From cdf3a2dc1385debf50096d54d4abf838c6cad4f7 Mon Sep 17 00:00:00 2001
From: Xavier Lau <x@acg.box>
Date: Mon, 30 Dec 2024 06:52:03 +0800
Subject: [PATCH 001/116] Migrate inclusion benchmark to v2 (#6368)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Migrate inclusion benchmark to v2.

---

Polkadot address: 156HGo9setPcU2qhFMVWLkcmtCEGySLwNqa3DaEiYSWtte4Y

---------

Co-authored-by: GitHub Action <action@github.com>
Co-authored-by: Bastian Köcher <git@kchr.de>
---
 .../parachains/src/inclusion/benchmarking.rs  | 63 ++++++++++---------
 prdoc/pr_6368.prdoc                           |  7 +++
 2 files changed, 41 insertions(+), 29 deletions(-)
 create mode 100644 prdoc/pr_6368.prdoc

diff --git a/polkadot/runtime/parachains/src/inclusion/benchmarking.rs b/polkadot/runtime/parachains/src/inclusion/benchmarking.rs
index 1dac3c92cf1..ab95c5c2366 100644
--- a/polkadot/runtime/parachains/src/inclusion/benchmarking.rs
+++ b/polkadot/runtime/parachains/src/inclusion/benchmarking.rs
@@ -14,6 +14,14 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
+use bitvec::{bitvec, prelude::Lsb0};
+use frame_benchmarking::v2::*;
+use pallet_message_queue as mq;
+use polkadot_primitives::{
+	vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CandidateCommitments,
+	HrmpChannelId, OutboundHrmpMessage, SessionIndex,
+};
+
 use super::*;
 use crate::{
 	builder::generate_validator_pairs,
@@ -21,13 +29,6 @@ use crate::{
 	hrmp::{HrmpChannel, HrmpChannels},
 	initializer, HeadData, ValidationCode,
 };
-use bitvec::{bitvec, prelude::Lsb0};
-use frame_benchmarking::benchmarks;
-use pallet_message_queue as mq;
-use polkadot_primitives::{
-	vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CandidateCommitments,
-	HrmpChannelId, OutboundHrmpMessage, SessionIndex,
-};
 
 fn create_candidate_commitments<T: crate::hrmp::pallet::Config>(
 	para_id: ParaId,
@@ -70,7 +71,7 @@ fn create_candidate_commitments<T: crate::hrmp::pallet::Config>(
 		BoundedVec::truncate_from(unbounded)
 	};
 
-	let new_validation_code = code_upgrade.then_some(ValidationCode(vec![42u8; 1024]));
+	let new_validation_code = code_upgrade.then_some(ValidationCode(vec![42_u8; 1024]));
 
 	CandidateCommitments::<u32> {
 		upward_messages,
@@ -87,18 +88,13 @@ fn create_messages(msg_len: usize, n_msgs: usize) -> Vec<Vec<u8>> {
 	vec![vec![best_number; msg_len]; n_msgs]
 }
 
-benchmarks! {
-	where_clause {
-		where
-			T: mq::Config + configuration::Config + initializer::Config,
-	}
-
-	enact_candidate {
-		let u in 0 .. 2;
-		let h in 0 .. 2;
-		let c in 0 .. 1;
+#[benchmarks(where T: mq::Config + configuration::Config + initializer::Config)]
+mod benchmarks {
+	use super::*;
 
-		let para = 42_u32.into();	// not especially important.
+	#[benchmark]
+	fn enact_candidate(u: Linear<0, 2>, h: Linear<0, 2>, c: Linear<0, 1>) {
+		let para = 42_u32.into(); // not especially important.
 
 		let max_len = mq::MaxMessageLenOf::<T>::get() as usize;
 
@@ -106,7 +102,7 @@ benchmarks! {
 		let n_validators = config.max_validators.unwrap_or(500);
 		let validators = generate_validator_pairs::<T>(n_validators);
 
-		let session = SessionIndex::from(0u32);
+		let session = SessionIndex::from(0_u32);
 		initializer::Pallet::<T>::test_trigger_on_new_session(
 			false,
 			session,
@@ -116,7 +112,7 @@ benchmarks! {
 		let backing_group_size = config.scheduler_params.max_validators_per_core.unwrap_or(5);
 		let head_data = HeadData(vec![0xFF; 1024]);
 
-		let relay_parent_number = BlockNumberFor::<T>::from(10u32);
+		let relay_parent_number = BlockNumberFor::<T>::from(10_u32);
 		let commitments = create_candidate_commitments::<T>(para, head_data, max_len, u, h, c != 0);
 		let backers = bitvec![u8, Lsb0; 1; backing_group_size as usize];
 		let availability_votes = bitvec![u8, Lsb0; 1; n_validators as usize];
@@ -135,17 +131,26 @@ benchmarks! {
 			ValidationCode(vec![1, 2, 3]).hash(),
 		);
 
-		let receipt = CommittedCandidateReceipt::<T::Hash> {
-			descriptor,
-			commitments,
-		};
+		let receipt = CommittedCandidateReceipt::<T::Hash> { descriptor, commitments };
 
-		Pallet::<T>::receive_upward_messages(para, vec![vec![0; max_len]; 1].as_slice());
-	} : { Pallet::<T>::enact_candidate(relay_parent_number, receipt, backers, availability_votes, core_index, backing_group) }
+		Pallet::<T>::receive_upward_messages(para, &vec![vec![0; max_len]; 1]);
 
-	impl_benchmark_test_suite!(
+		#[block]
+		{
+			Pallet::<T>::enact_candidate(
+				relay_parent_number,
+				receipt,
+				backers,
+				availability_votes,
+				core_index,
+				backing_group,
+			);
+		}
+	}
+
+	impl_benchmark_test_suite! {
 		Pallet,
 		crate::mock::new_test_ext(Default::default()),
 		crate::mock::Test
-	);
+	}
 }
diff --git a/prdoc/pr_6368.prdoc b/prdoc/pr_6368.prdoc
new file mode 100644
index 00000000000..4fd3963eb05
--- /dev/null
+++ b/prdoc/pr_6368.prdoc
@@ -0,0 +1,7 @@
+title: Migrate inclusion benchmark to v2
+doc:
+- audience: Runtime Dev
+  description: Migrate inclusion benchmark to v2.
+crates:
+- name: polkadot-runtime-parachains
+  bump: patch
-- 
GitLab


From f19640bdf98f72c788e60f647628b3fc98192bb1 Mon Sep 17 00:00:00 2001
From: Dmitry Markin <dmitry@markin.tech>
Date: Mon, 30 Dec 2024 10:44:47 +0200
Subject: [PATCH 002/116] Log peerset set ID -> protocol name mapping (#7005)

To simplify debugging of peerset related issues like
https://github.com/paritytech/polkadot-sdk/issues/6573#issuecomment-2563091343.

---------

Co-authored-by: command-bot <>
---
 prdoc/pr_7005.prdoc                      |  7 +++++++
 substrate/client/network/src/protocol.rs | 11 +++++++++--
 2 files changed, 16 insertions(+), 2 deletions(-)
 create mode 100644 prdoc/pr_7005.prdoc

diff --git a/prdoc/pr_7005.prdoc b/prdoc/pr_7005.prdoc
new file mode 100644
index 00000000000..a61f7c5b9b7
--- /dev/null
+++ b/prdoc/pr_7005.prdoc
@@ -0,0 +1,7 @@
+title: Log peerset set ID -> protocol name mapping
+doc:
+- audience: Node Dev
+  description: To simplify debugging of peerset related issues like https://github.com/paritytech/polkadot-sdk/issues/6573#issuecomment-2563091343.
+crates:
+- name: sc-network
+  bump: patch
diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs
index 6da1d601b34..81e1848adef 100644
--- a/substrate/client/network/src/protocol.rs
+++ b/substrate/client/network/src/protocol.rs
@@ -34,7 +34,7 @@ use libp2p::{
 	},
 	Multiaddr, PeerId,
 };
-use log::warn;
+use log::{debug, warn};
 
 use codec::DecodeAll;
 use sc_network_common::role::Roles;
@@ -53,6 +53,9 @@ mod notifications;
 
 pub mod message;
 
+// Log target for this file.
+const LOG_TARGET: &str = "sub-libp2p";
+
 /// Maximum size used for notifications in the block announce and transaction protocols.
 // Must be equal to `max(MAX_BLOCK_ANNOUNCE_SIZE, MAX_TRANSACTIONS_SIZE)`.
 pub(crate) const BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE: u64 = MAX_RESPONSE_SIZE;
@@ -124,6 +127,10 @@ impl<B: BlockT> Protocol<B> {
 				handle.set_metrics(notification_metrics.clone());
 			});
 
+			protocol_configs.iter().enumerate().for_each(|(i, (p, _, _))| {
+				debug!(target: LOG_TARGET, "Notifications protocol {:?}: {}", SetId::from(i), p.name);
+			});
+
 			(
 				Notifications::new(
 					protocol_controller_handles,
@@ -164,7 +171,7 @@ impl<B: BlockT> Protocol<B> {
 		{
 			self.behaviour.disconnect_peer(peer_id, SetId::from(position));
 		} else {
-			warn!(target: "sub-libp2p", "disconnect_peer() with invalid protocol name")
+			warn!(target: LOG_TARGET, "disconnect_peer() with invalid protocol name")
 		}
 	}
 
-- 
GitLab


From 997db8e2035ce180f502ccb54eb06ab464d95dab Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Bastian=20K=C3=B6cher?= <git@kchr.de>
Date: Mon, 30 Dec 2024 11:34:31 +0100
Subject: [PATCH 003/116] pallet-bounties: Fix benchmarks for 0 ED (#7013)

Closes: https://github.com/paritytech/polkadot-sdk/issues/7009

---------

Co-authored-by: command-bot <>
---
 prdoc/pr_7013.prdoc                          |  7 ++++++
 substrate/frame/bounties/src/benchmarking.rs | 24 ++++++++++++--------
 substrate/frame/bounties/src/lib.rs          |  1 +
 3 files changed, 23 insertions(+), 9 deletions(-)
 create mode 100644 prdoc/pr_7013.prdoc

diff --git a/prdoc/pr_7013.prdoc b/prdoc/pr_7013.prdoc
new file mode 100644
index 00000000000..138fa7f2310
--- /dev/null
+++ b/prdoc/pr_7013.prdoc
@@ -0,0 +1,7 @@
+title: 'pallet-bounties: Fix benchmarks for 0 ED'
+doc:
+- audience: Runtime Dev
+  description: 'Closes: https://github.com/paritytech/polkadot-sdk/issues/7009'
+crates:
+- name: pallet-bounties
+  bump: patch
diff --git a/substrate/frame/bounties/src/benchmarking.rs b/substrate/frame/bounties/src/benchmarking.rs
index 1e931958898..b5155909e3c 100644
--- a/substrate/frame/bounties/src/benchmarking.rs
+++ b/substrate/frame/bounties/src/benchmarking.rs
@@ -15,9 +15,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-//! bounties pallet benchmarking.
-
-#![cfg(feature = "runtime-benchmarks")]
+//! Bounties pallet benchmarking.
 
 use super::*;
 
@@ -37,6 +35,16 @@ fn set_block_number<T: Config<I>, I: 'static>(n: BlockNumberFor<T, I>) {
 	<T as pallet_treasury::Config<I>>::BlockNumberProvider::set_block_number(n);
 }
 
+fn minimum_balance<T: Config<I>, I: 'static>() -> BalanceOf<T, I> {
+	let minimum_balance = T::Currency::minimum_balance();
+
+	if minimum_balance.is_zero() {
+		1u32.into()
+	} else {
+		minimum_balance
+	}
+}
+
 // Create bounties that are approved for use in `on_initialize`.
 fn create_approved_bounties<T: Config<I>, I: 'static>(n: u32) -> Result<(), BenchmarkError> {
 	for i in 0..n {
@@ -62,12 +70,10 @@ fn setup_bounty<T: Config<I>, I: 'static>(
 	let fee = value / 2u32.into();
 	let deposit = T::BountyDepositBase::get() +
 		T::DataDepositPerByte::get() * T::MaximumReasonLength::get().into();
-	let _ = T::Currency::make_free_balance_be(&caller, deposit + T::Currency::minimum_balance());
+	let _ = T::Currency::make_free_balance_be(&caller, deposit + minimum_balance::<T, I>());
 	let curator = account("curator", u, SEED);
-	let _ = T::Currency::make_free_balance_be(
-		&curator,
-		fee / 2u32.into() + T::Currency::minimum_balance(),
-	);
+	let _ =
+		T::Currency::make_free_balance_be(&curator, fee / 2u32.into() + minimum_balance::<T, I>());
 	let reason = vec![0; d as usize];
 	(caller, curator, fee, value, reason)
 }
@@ -91,7 +97,7 @@ fn create_bounty<T: Config<I>, I: 'static>(
 
 fn setup_pot_account<T: Config<I>, I: 'static>() {
 	let pot_account = Bounties::<T, I>::account_id();
-	let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000u32.into());
+	let value = minimum_balance::<T, I>().saturating_mul(1_000_000_000u32.into());
 	let _ = T::Currency::make_free_balance_be(&pot_account, value);
 }
 
diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs
index 729c76b5cc7..d9accc5061c 100644
--- a/substrate/frame/bounties/src/lib.rs
+++ b/substrate/frame/bounties/src/lib.rs
@@ -84,6 +84,7 @@
 
 #![cfg_attr(not(feature = "std"), no_std)]
 
+#[cfg(feature = "runtime-benchmarks")]
 mod benchmarking;
 pub mod migrations;
 mod tests;
-- 
GitLab


From b63555510b0c9750b24f6bea9c24ef031404e643 Mon Sep 17 00:00:00 2001
From: Maciej <maciej.zyszkiewicz@parity.io>
Date: Mon, 30 Dec 2024 11:07:53 +0000
Subject: [PATCH 004/116] Excluding chainlink domain for link checker CI
 (#6524)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Excludes the chainlink domain through a lychee config to fix the
complaining link checker CI test. Chainlink is gated behind a captcha.

---------

Co-authored-by: Bastian Köcher <git@kchr.de>
Co-authored-by: command-bot <>
---
 .config/lychee.toml | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/.config/lychee.toml b/.config/lychee.toml
index b1f08de3334..58f8d068d9d 100644
--- a/.config/lychee.toml
+++ b/.config/lychee.toml
@@ -28,7 +28,7 @@ exclude = [
 	"http://visitme/",
 	"https://visitme/",
 
-	# TODO <https://github.com/paritytech/polkadot-sdk/issues/134>
+	# TODO meta issue: <https://github.com/paritytech/polkadot-sdk/issues/134>
 	"https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs",
 	"https://docs.substrate.io/rustdocs/latest/sp_api/macro.decl_runtime_apis.html",
 	"https://github.com/ipfs/js-ipfs-bitswap/blob/",
@@ -50,8 +50,10 @@ exclude = [
 	"https://w3f.github.io/parachain-implementers-guide/runtime/session_info.html",
 
 	# Behind a captcha (code 403):
+	"https://chainlist.org/chain/*",
 	"https://iohk.io/en/blog/posts/2023/11/03/partner-chains-are-coming-to-cardano/",
 	"https://www.reddit.com/r/rust/comments/3spfh1/does_collect_allocate_more_than_once_while/",
+
 	# 403 rate limited:
 	"https://etherscan.io/block/11090290",
 	"https://subscan.io/",
-- 
GitLab


From b4177a9fe173ae592ccf581d290e869aff1cafc4 Mon Sep 17 00:00:00 2001
From: Dmitry Markin <dmitry@markin.tech>
Date: Mon, 30 Dec 2024 16:28:15 +0200
Subject: [PATCH 005/116] sync: Send already connected peers to new subscribers
 (#7011)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Introduce `SyncEvent::InitialPeers` message sent to new subscribers to
allow them correctly tracking sync peers. This resolves a race condition
described in
https://github.com/paritytech/polkadot-sdk/issues/6573#issuecomment-2563091343.

Fixes https://github.com/paritytech/polkadot-sdk/issues/6573.

---------

Co-authored-by: command-bot <>
Co-authored-by: Bastian Köcher <git@kchr.de>
---
 prdoc/pr_7011.prdoc                           | 16 +++++++++++++
 substrate/client/network-gossip/src/bridge.rs | 10 ++++----
 substrate/client/network-gossip/src/lib.rs    | 13 +++++++----
 substrate/client/network/statement/src/lib.rs | 23 ++++++++++++++-----
 substrate/client/network/sync/src/engine.rs   |  6 ++++-
 substrate/client/network/sync/src/types.rs    |  4 ++++
 .../client/network/transactions/src/lib.rs    | 23 ++++++++++++++-----
 7 files changed, 73 insertions(+), 22 deletions(-)
 create mode 100644 prdoc/pr_7011.prdoc

diff --git a/prdoc/pr_7011.prdoc b/prdoc/pr_7011.prdoc
new file mode 100644
index 00000000000..55fe0c73ca0
--- /dev/null
+++ b/prdoc/pr_7011.prdoc
@@ -0,0 +1,16 @@
+title: 'sync: Send already connected peers to new subscribers'
+doc:
+- audience: Node Dev
+  description: |-
+    Introduce `SyncEvent::InitialPeers` message sent to new subscribers to allow them correctly tracking sync peers. This resolves a race condition described in https://github.com/paritytech/polkadot-sdk/issues/6573#issuecomment-2563091343.
+
+    Fixes https://github.com/paritytech/polkadot-sdk/issues/6573.
+crates:
+- name: sc-network-gossip
+  bump: major
+- name: sc-network-statement
+  bump: patch
+- name: sc-network-sync
+  bump: major
+- name: sc-network-transactions
+  bump: patch
diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs
index 2daf1e49ee4..bff258a9a01 100644
--- a/substrate/client/network-gossip/src/bridge.rs
+++ b/substrate/client/network-gossip/src/bridge.rs
@@ -254,10 +254,12 @@ impl<B: BlockT> Future for GossipEngine<B> {
 
 					match sync_event_stream {
 						Poll::Ready(Some(event)) => match event {
-							SyncEvent::PeerConnected(remote) =>
-								this.network.add_set_reserved(remote, this.protocol.clone()),
-							SyncEvent::PeerDisconnected(remote) =>
-								this.network.remove_set_reserved(remote, this.protocol.clone()),
+							SyncEvent::InitialPeers(peer_ids) =>
+								this.network.add_set_reserved(peer_ids, this.protocol.clone()),
+							SyncEvent::PeerConnected(peer_id) =>
+								this.network.add_set_reserved(vec![peer_id], this.protocol.clone()),
+							SyncEvent::PeerDisconnected(peer_id) =>
+								this.network.remove_set_reserved(peer_id, this.protocol.clone()),
 						},
 						// The sync event stream closed. Do the same for [`GossipValidator`].
 						Poll::Ready(None) => {
diff --git a/substrate/client/network-gossip/src/lib.rs b/substrate/client/network-gossip/src/lib.rs
index 20d9922200c..2ec573bf9e3 100644
--- a/substrate/client/network-gossip/src/lib.rs
+++ b/substrate/client/network-gossip/src/lib.rs
@@ -82,15 +82,18 @@ mod validator;
 
 /// Abstraction over a network.
 pub trait Network<B: BlockT>: NetworkPeers + NetworkEventStream {
-	fn add_set_reserved(&self, who: PeerId, protocol: ProtocolName) {
-		let addr = Multiaddr::empty().with(Protocol::P2p(*who.as_ref()));
-		let result = self.add_peers_to_reserved_set(protocol, iter::once(addr).collect());
+	fn add_set_reserved(&self, peer_ids: Vec<PeerId>, protocol: ProtocolName) {
+		let addrs = peer_ids
+			.into_iter()
+			.map(|peer_id| Multiaddr::empty().with(Protocol::P2p(peer_id.into())))
+			.collect();
+		let result = self.add_peers_to_reserved_set(protocol, addrs);
 		if let Err(err) = result {
 			log::error!(target: "gossip", "add_set_reserved failed: {}", err);
 		}
 	}
-	fn remove_set_reserved(&self, who: PeerId, protocol: ProtocolName) {
-		let result = self.remove_peers_from_reserved_set(protocol, iter::once(who).collect());
+	fn remove_set_reserved(&self, peer_id: PeerId, protocol: ProtocolName) {
+		let result = self.remove_peers_from_reserved_set(protocol, iter::once(peer_id).collect());
 		if let Err(err) = result {
 			log::error!(target: "gossip", "remove_set_reserved failed: {}", err);
 		}
diff --git a/substrate/client/network/statement/src/lib.rs b/substrate/client/network/statement/src/lib.rs
index df93788696e..586a15cadd6 100644
--- a/substrate/client/network/statement/src/lib.rs
+++ b/substrate/client/network/statement/src/lib.rs
@@ -33,7 +33,8 @@ use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered, FutureExt}
 use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64};
 use sc_network::{
 	config::{NonReservedPeerMode, SetConfig},
-	error, multiaddr,
+	error,
+	multiaddr::{Multiaddr, Protocol},
 	peer_store::PeerStoreProvider,
 	service::{
 		traits::{NotificationEvent, NotificationService, ValidationResult},
@@ -296,9 +297,19 @@ where
 
 	fn handle_sync_event(&mut self, event: SyncEvent) {
 		match event {
-			SyncEvent::PeerConnected(remote) => {
-				let addr = iter::once(multiaddr::Protocol::P2p(remote.into()))
-					.collect::<multiaddr::Multiaddr>();
+			SyncEvent::InitialPeers(peer_ids) => {
+				let addrs = peer_ids
+					.into_iter()
+					.map(|peer_id| Multiaddr::empty().with(Protocol::P2p(peer_id.into())))
+					.collect();
+				let result =
+					self.network.add_peers_to_reserved_set(self.protocol_name.clone(), addrs);
+				if let Err(err) = result {
+					log::error!(target: LOG_TARGET, "Add reserved peers failed: {}", err);
+				}
+			},
+			SyncEvent::PeerConnected(peer_id) => {
+				let addr = Multiaddr::empty().with(Protocol::P2p(peer_id.into()));
 				let result = self.network.add_peers_to_reserved_set(
 					self.protocol_name.clone(),
 					iter::once(addr).collect(),
@@ -307,10 +318,10 @@ where
 					log::error!(target: LOG_TARGET, "Add reserved peer failed: {}", err);
 				}
 			},
-			SyncEvent::PeerDisconnected(remote) => {
+			SyncEvent::PeerDisconnected(peer_id) => {
 				let result = self.network.remove_peers_from_reserved_set(
 					self.protocol_name.clone(),
-					iter::once(remote).collect(),
+					iter::once(peer_id).collect(),
 				);
 				if let Err(err) = result {
 					log::error!(target: LOG_TARGET, "Failed to remove reserved peer: {err}");
diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs
index 0c39ea0b93c..4003361525e 100644
--- a/substrate/client/network/sync/src/engine.rs
+++ b/substrate/client/network/sync/src/engine.rs
@@ -656,7 +656,11 @@ where
 			ToServiceCommand::SetSyncForkRequest(peers, hash, number) => {
 				self.strategy.set_sync_fork_request(peers, &hash, number);
 			},
-			ToServiceCommand::EventStream(tx) => self.event_streams.push(tx),
+			ToServiceCommand::EventStream(tx) => {
+				let _ = tx
+					.unbounded_send(SyncEvent::InitialPeers(self.peers.keys().cloned().collect()));
+				self.event_streams.push(tx);
+			},
 			ToServiceCommand::RequestJustification(hash, number) =>
 				self.strategy.request_justification(&hash, number),
 			ToServiceCommand::ClearJustificationRequests =>
diff --git a/substrate/client/network/sync/src/types.rs b/substrate/client/network/sync/src/types.rs
index 5745a34378d..a72a2f7c1ff 100644
--- a/substrate/client/network/sync/src/types.rs
+++ b/substrate/client/network/sync/src/types.rs
@@ -127,6 +127,10 @@ where
 
 /// Syncing-related events that other protocols can subscribe to.
 pub enum SyncEvent {
+	/// All connected peers that the syncing implementation is tracking.
+	/// Always sent as the first message to the stream.
+	InitialPeers(Vec<PeerId>),
+
 	/// Peer that the syncing implementation is tracking connected.
 	PeerConnected(PeerId),
 
diff --git a/substrate/client/network/transactions/src/lib.rs b/substrate/client/network/transactions/src/lib.rs
index 44fa702ef6d..49f429a04ee 100644
--- a/substrate/client/network/transactions/src/lib.rs
+++ b/substrate/client/network/transactions/src/lib.rs
@@ -35,7 +35,8 @@ use log::{debug, trace, warn};
 use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64};
 use sc_network::{
 	config::{NonReservedPeerMode, ProtocolId, SetConfig},
-	error, multiaddr,
+	error,
+	multiaddr::{Multiaddr, Protocol},
 	peer_store::PeerStoreProvider,
 	service::{
 		traits::{NotificationEvent, NotificationService, ValidationResult},
@@ -377,9 +378,19 @@ where
 
 	fn handle_sync_event(&mut self, event: SyncEvent) {
 		match event {
-			SyncEvent::PeerConnected(remote) => {
-				let addr = iter::once(multiaddr::Protocol::P2p(remote.into()))
-					.collect::<multiaddr::Multiaddr>();
+			SyncEvent::InitialPeers(peer_ids) => {
+				let addrs = peer_ids
+					.into_iter()
+					.map(|peer_id| Multiaddr::empty().with(Protocol::P2p(peer_id.into())))
+					.collect();
+				let result =
+					self.network.add_peers_to_reserved_set(self.protocol_name.clone(), addrs);
+				if let Err(err) = result {
+					log::error!(target: LOG_TARGET, "Add reserved peers failed: {}", err);
+				}
+			},
+			SyncEvent::PeerConnected(peer_id) => {
+				let addr = Multiaddr::empty().with(Protocol::P2p(peer_id.into()));
 				let result = self.network.add_peers_to_reserved_set(
 					self.protocol_name.clone(),
 					iter::once(addr).collect(),
@@ -388,10 +399,10 @@ where
 					log::error!(target: LOG_TARGET, "Add reserved peer failed: {}", err);
 				}
 			},
-			SyncEvent::PeerDisconnected(remote) => {
+			SyncEvent::PeerDisconnected(peer_id) => {
 				let result = self.network.remove_peers_from_reserved_set(
 					self.protocol_name.clone(),
-					iter::once(remote).collect(),
+					iter::once(peer_id).collect(),
 				);
 				if let Err(err) = result {
 					log::error!(target: LOG_TARGET, "Remove reserved peer failed: {}", err);
-- 
GitLab


From 5abdc5c34c544aaf21d98778eae31ccc2349c422 Mon Sep 17 00:00:00 2001
From: SihanoukSolver29 <150921296+SihanoukSolver29@users.noreply.github.com>
Date: Mon, 30 Dec 2024 15:44:28 +0000
Subject: [PATCH 006/116] correct path in cumulus README (#7001)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

# Description

This PR fixes the file path in `cumulus/README.md` so the link to the
container documentation points to the correct location.

## Review Notes

The only change are the links in `cumulus/README.md` from
`./docs/contributor/container.md` to `../docs/contributor/container.md`.

# Checklist

* [x] My PR includes a detailed description as outlined in the
"Description" and its two subsections above.
* [x] My PR follows the [labeling requirements](

https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md#Process
) of this project (at minimum one label for `T` required)
* External contributors: ask maintainers to put the right label on your
PR.
* [ ] I have made corresponding changes to the documentation (if
applicable)
* [ ] I have added tests that prove my fix is effective or that my
feature works (if applicable)

Co-authored-by: Guillaume Thiolliere <gui.thiolliere@gmail.com>
Co-authored-by: Bastian Köcher <git@kchr.de>
---
 cumulus/README.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/cumulus/README.md b/cumulus/README.md
index 0c47df99902..400f9481c3f 100644
--- a/cumulus/README.md
+++ b/cumulus/README.md
@@ -4,7 +4,7 @@
 
 This repository contains both the Cumulus SDK and also specific chains implemented on top of this SDK.
 
-If you only want to run a **Polkadot Parachain Node**, check out our [container section](./docs/contributor/container.md).
+If you only want to run a **Polkadot Parachain Node**, check out our [container section](../docs/contributor/container.md).
 
 ## Cumulus SDK
 
@@ -34,7 +34,7 @@ A Polkadot [collator](https://wiki.polkadot.network/docs/en/learn-collator) for
 `polkadot-parachain` binary (previously called `polkadot-collator`).
 
 You may run `polkadot-parachain` locally after building it or using one of the container option described
-[here](./docs/contributor/container.md).
+[here](../docs/contributor/container.md).
 
 ### Relay Chain Interaction
 To operate a parachain node, a connection to the corresponding relay chain is necessary. This can be achieved in one of
-- 
GitLab


From 9d760a9f569cf58bf6f6c19bac93d0d33f54a454 Mon Sep 17 00:00:00 2001
From: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
Date: Thu, 2 Jan 2025 13:13:45 +0100
Subject: [PATCH 007/116] [CI] Skip SemVer on R0-silent and update docs (#6285)

Changes:
- Make R0-silent not run the semver check again. Originally I thought
this would be good to have a bullet-proof check, but it now often
triggers when CI or unrelated files are changed. In the end, the
developer has to make the right choice here - and always will need to.
So bringing back the R0 label gives more power to the devs and should
increase dev velocity. We still need to ensure that every use of this
label is well understood, and not just used out of lazyness.
- Fix `/cmd prdoc` bump levels
- Update docs

---------

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
---
 .github/scripts/generate-prdoc.py   |  18 +++-
 .github/workflows/check-semver.yml  |  10 ++-
 .github/workflows/command-prdoc.yml |   2 +-
 docs/contributor/prdoc.md           | 124 ++++++++++++++++------------
 4 files changed, 97 insertions(+), 57 deletions(-)

diff --git a/.github/scripts/generate-prdoc.py b/.github/scripts/generate-prdoc.py
index 780fa001297..9154f185e64 100644
--- a/.github/scripts/generate-prdoc.py
+++ b/.github/scripts/generate-prdoc.py
@@ -36,6 +36,21 @@ def from_pr_number(n, audience, bump, force):
 
 	create_prdoc(n, audience, pr.title, pr.body, patch, bump, force)
 
+def translate_audience(audience):
+	aliases = {
+		'runtime_dev': 'Runtime Dev',
+		'runtime_user': 'Runtime Operator',
+		'node_dev': 'Node Dev',
+		'node_user': 'Node User',
+	}
+
+	if audience in aliases:
+		to = aliases[audience]
+		print(f"Translated audience '{audience}' to '{to}'")
+		audience = to
+
+	return audience
+
 def create_prdoc(pr, audience, title, description, patch, bump, force):
 	path = f"prdoc/pr_{pr}.prdoc"
 
@@ -49,6 +64,7 @@ def create_prdoc(pr, audience, title, description, patch, bump, force):
 		print(f"No preexisting PrDoc for PR {pr}")
 
 	prdoc = { "title": title, "doc": [{}], "crates": [] }
+	audience = translate_audience(audience)
 
 	prdoc["doc"][0]["audience"] = audience
 	prdoc["doc"][0]["description"] = description
@@ -117,7 +133,7 @@ def setup_parser(parser=None, pr_required=True):
 		parser = argparse.ArgumentParser()
 	parser.add_argument("--pr", type=int, required=pr_required, help="The PR number to generate the PrDoc for.")
 	parser.add_argument("--audience", type=str, nargs='*', choices=allowed_audiences, default=["todo"], help="The audience of whom the changes may concern. Example: --audience runtime_dev node_dev")
-	parser.add_argument("--bump", type=str, default="major", choices=["patch", "minor", "major", "silent", "ignore", "no_change"], help="A default bump level for all crates. Example: --bump patch")
+	parser.add_argument("--bump", type=str, default="major", choices=["patch", "minor", "major", "silent", "ignore", "none"], help="A default bump level for all crates. Example: --bump patch")
 	parser.add_argument("--force", action="store_true", help="Whether to overwrite any existing PrDoc.")
 	return parser
 
diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml
index 16028c8de77..0da3e54ef60 100644
--- a/.github/workflows/check-semver.yml
+++ b/.github/workflows/check-semver.yml
@@ -2,7 +2,7 @@ name: Check semver
 
 on:
   pull_request:
-    types: [opened, synchronize, reopened, ready_for_review]
+    types: [opened, synchronize, reopened, ready_for_review, labeled, unlabeled]
   workflow_dispatch:
   merge_group:
 
@@ -62,21 +62,29 @@ jobs:
 
           echo "PRDOC_EXTRA_ARGS=--max-bump minor" >> $GITHUB_ENV
 
+      - name: Echo Skip
+        if: ${{ contains(github.event.pull_request.labels.*.name, 'R0-silent') }}
+        run: echo "Skipping this PR because it is labeled as R0-silent."
+
       - name: Rust Cache
+        if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }}
         uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5
         with:
           cache-on-failure: true
 
       - name: Rust compilation prerequisites
+        if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }}
         run: |
           rustup default $TOOLCHAIN
           rustup component add rust-src --toolchain $TOOLCHAIN
 
       - name: install parity-publish
+        if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }}
         # Set the target dir to cache the build.
         run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.10.3 --locked -q
 
       - name: check semver
+        if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }}
         run: |
           if [ -z "$PR" ]; then
             echo "Skipping master/merge queue"
diff --git a/.github/workflows/command-prdoc.yml b/.github/workflows/command-prdoc.yml
index 7022e8e0e00..71dbcfbd228 100644
--- a/.github/workflows/command-prdoc.yml
+++ b/.github/workflows/command-prdoc.yml
@@ -14,7 +14,7 @@ on:
         required: true
         options:
           - "TODO"
-          - "no_change"
+          - "none"
           - "patch"
           - "minor"
           - "major"
diff --git a/docs/contributor/prdoc.md b/docs/contributor/prdoc.md
index 4a1a3c1f068..1f6252425e6 100644
--- a/docs/contributor/prdoc.md
+++ b/docs/contributor/prdoc.md
@@ -1,73 +1,88 @@
 # PRDoc
 
-A [prdoc](https://github.com/paritytech/prdoc) is like a changelog but for a Pull Request. We use this approach to
-record changes on a crate level. This information is then processed by the release team to apply the correct crate
-version bumps and to generate the CHANGELOG of the next release.
+A [prdoc](https://github.com/paritytech/prdoc) is like a changelog but for a Pull Request. We use
+this approach to record changes on a crate level. This information is then processed by the release
+team to apply the correct crate version bumps and to generate the CHANGELOG of the next release.
 
 ## Requirements
 
-When creating a PR, the author needs to decide with the `R0-silent` label whether the PR has to contain a prdoc. The
-`R0` label should only be placed for No-OP changes like correcting a typo in a comment or CI stuff. If unsure, ping
-the [CODEOWNERS](../../.github/CODEOWNERS) for advice.
+When creating a PR, the author needs to decide with the `R0-silent` label whether the PR has to
+contain a prdoc. The `R0` label should only be placed for No-OP changes like correcting a typo in a
+comment or CI stuff. If unsure, ping the [CODEOWNERS](../../.github/CODEOWNERS) for advice.
 
-## PRDoc How-To
+## Auto Generation
 
-A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). Please follow these steps to generate one:
-
-1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install parity-prdoc`.
-1. Open a Pull Request and get the PR number.
-1. Generate the file with `prdoc generate <PR_NUMBER>`. The output filename will be printed.
-1. Optional: Install the `prdoc/schema_user.json` schema in your editor, for example
-   [VsCode](https://github.com/paritytech/prdoc?tab=readme-ov-file#schemas).
-1. Edit your `.prdoc` file according to the [Audience](#pick-an-audience) and [SemVer](#record-semver-changes) sections.
-1. Check your prdoc with `prdoc check -n <PR_NUMBER>`. This is optional since the CI will also check it.
-
-> **Tip:** GitHub CLI and jq can be used to provide the number of your PR to generate the correct file:  
-> `prdoc generate $(gh pr view --json number | jq '.number') -o prdoc`
-
-Alternatively you can call the prdoc from PR via `/cmd prdoc` (see args with `/cmd prdoc --help`)
-in a comment to PR to trigger it from CI.
+You can create a PrDoc by using the `/cmd prdoc` command (see args with `/cmd prdoc --help`) in a
+comment on your PR.
 
 Options:
 
-- `pr`: The PR number to generate the PrDoc for.
-- `audience`: The audience of whom the changes may concern.
-- `bump`: A default bump level for all crates.
-  The PrDoc will likely need to be edited to reflect the actual changes after generation.
-- `force`: Whether to overwrite any existing PrDoc.
+- `audience` The audience of whom the changes may concern.
+  - `runtime_dev`: Anyone building a runtime themselves. For example parachain teams, or people
+    providing template runtimes. Also devs using pallets, FRAME etc directly. These are people who
+    care about the protocol (WASM), not the meta-protocol (client).
+  - `runtime_user`: Anyone using the runtime. Can be front-end devs reading the state, exchanges
+    listening for events, libraries that have hard-coded pallet indices etc. Anything that would
+    result in an observable change to the runtime behaviour must be marked with this.
+  - `node_dev`: Those who build around the client side code. Alternative client builders, SMOLDOT,
+  those who consume RPCs. These are people who are oblivious to the runtime changes. They only care
+  about the meta-protocol, not the protocol itself.
+  - `node_operator`: People who run the node. Think of validators, exchanges, indexer services, CI
+    actions. Anything that modifies how the binary behaves (its arguments, default arguments, error
+    messags, etc) must be marked with this.
+- `bump:`: The default bump level for all crates. The PrDoc will likely need to be edited to reflect
+  the actual changes after generation. More details in the section below.
+  - `none`: There is no observable change. So to say: if someone were handed the old and the new
+    version of our software, it would be impossible to figure out what version is which.
+  - `patch`: Fixes that will never cause compilation errors if someone updates to this version. No
+    functionality has been changed. Should be limited to fixing bugs or No-OP implementation
+    changes.
+  - `minor`: Additions that will never cause compilation errors if someone updates to this version.
+    No functionality has been changed. Should be limited to adding new features.
+  - `major`: Anything goes.
+- `force: true|false`: Whether to overwrite any existing PrDoc file.
 
-## Pick An Audience
-
-While describing a PR, the author needs to consider which audience(s) need to be addressed.
-The list of valid audiences is described and documented in the JSON schema as follow:
+### Example
 
-- `Node Dev`: Those who build around the client side code. Alternative client builders, SMOLDOT, those who consume RPCs.
-  These are people who are oblivious to the runtime changes. They only care about the meta-protocol, not the protocol
-  itself.
+```bash
+/cmd prdoc --audience runtime_dev --bump patch
+```
 
-- `Runtime Dev`: All of those who rely on the runtime. A parachain team that is using a pallet. A DApp that is using a
-  pallet. These are people who care about the protocol (WASM), not the meta-protocol (client).
+## Local Generation
 
-- `Node Operator`: Those who don't write any code and only run code.
+A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). Please follow these steps
+to generate one:
 
-- `Runtime User`: Anyone using the runtime. This can be a token holder or a dev writing a front end for a chain.
+1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install
+   parity-prdoc`.
+1. Open a Pull Request and get the PR number.
+1. Generate the file with `prdoc generate <PR_NUMBER>`. The output filename will be printed.
+1. Optional: Install the `prdoc/schema_user.json` schema in your editor, for example
+   [VsCode](https://github.com/paritytech/prdoc?tab=readme-ov-file#schemas).
+1. Edit your `.prdoc` file according to the [Audience](#pick-an-audience) and
+   [SemVer](#record-semver-changes) sections.
+1. Check your prdoc with `prdoc check -n <PR_NUMBER>`. This is optional since the CI will also check
+   it.
 
-If you have a change that affects multiple audiences, you can either list them all, or write multiple sections and
-re-phrase the changes for each audience.
+> **Tip:** GitHub CLI and jq can be used to provide the number of your PR to generate the correct
+> file:  
+> `prdoc generate $(gh pr view --json number | jq '.number') -o prdoc`
 
 ## Record SemVer Changes
 
-All published crates that got modified need to have an entry in the `crates` section of your `PRDoc`. This entry tells
-the release team how to bump the crate version prior to the next release. It is very important that this information is
-correct, otherwise it could break the code of downstream teams.
+All published crates that got modified need to have an entry in the `crates` section of your
+`PRDoc`. This entry tells the release team how to bump the crate version prior to the next release.
+It is very important that this information is correct, otherwise it could break the code of
+downstream teams.
 
 The bump can either be `major`, `minor`, `patch` or `none`. The three first options are defined by
-[rust-lang.org](https://doc.rust-lang.org/cargo/reference/semver.html), whereas `None` should be picked if no other
-applies. The `None` option is equivalent to the `R0-silent` label, but on a crate level. Experimental and private APIs
-are exempt from bumping and can be broken at any time. Please read the [Crate Section](../RELEASE.md) of the RELEASE doc
-about them.
+[rust-lang.org](https://doc.rust-lang.org/cargo/reference/semver.html), whereas `None` should be
+picked if no other applies. The `None` option is equivalent to the `R0-silent` label, but on a crate
+level. Experimental and private APIs are exempt from bumping and can be broken at any time. Please
+read the [Crate Section](../RELEASE.md) of the RELEASE doc about them.
 
-> **Note**: There is currently no CI in place to sanity check this information, but should be added soon.
+> **Note**: There is currently no CI in place to sanity check this information, but should be added
+> soon.
 
 ### Example
 
@@ -81,12 +96,13 @@ crates:
     bump: minor
 ```
 
-It means that downstream code using `frame-example-pallet` is still guaranteed to work as before, while code using
-`frame-example` might break.
+It means that downstream code using `frame-example-pallet` is still guaranteed to work as before,
+while code using `frame-example` might break.
 
 ### Dependencies
 
-A crate that depends on another crate will automatically inherit its `major` bumps. This means that you do not need to
-bump a crate that had a SemVer breaking change only from re-exporting another crate with a breaking change.  
-`minor` an `patch` bumps do not need to be inherited, since `cargo` will automatically update them to the latest
-compatible version.
+A crate that depends on another crate will automatically inherit its `major` bumps. This means that
+you do not need to bump a crate that had a SemVer breaking change only from re-exporting another
+crate with a breaking change.  
+`minor` an `patch` bumps do not need to be inherited, since `cargo` will automatically update them
+to the latest compatible version.
-- 
GitLab


From fcbc0ef2d109c9c96c6821959c9899a3d3dd20a1 Mon Sep 17 00:00:00 2001
From: Andrei Eres <eresav@me.com>
Date: Thu, 2 Jan 2025 17:54:03 +0100
Subject: [PATCH 008/116] Add workflow for networking benchmarks (#7029)

# Description

Adds charts for networking benchmarks
---
 .github/workflows/networking-benchmarks.yml | 107 ++++++++++++++++++++
 1 file changed, 107 insertions(+)
 create mode 100644 .github/workflows/networking-benchmarks.yml

diff --git a/.github/workflows/networking-benchmarks.yml b/.github/workflows/networking-benchmarks.yml
new file mode 100644
index 00000000000..e45ae601105
--- /dev/null
+++ b/.github/workflows/networking-benchmarks.yml
@@ -0,0 +1,107 @@
+name: Networking Benchmarks
+
+on:
+  push:
+    branches:
+      - master
+
+concurrency:
+  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+  cancel-in-progress: true
+
+permissions:
+  contents: read
+
+jobs:
+  preflight:
+    uses: ./.github/workflows/reusable-preflight.yml
+
+  build:
+    timeout-minutes: 80
+    needs: [preflight]
+    runs-on: ${{ needs.preflight.outputs.RUNNER_BENCHMARK }}
+    container:
+      image: ${{ needs.preflight.outputs.IMAGE }}
+    strategy:
+      fail-fast: false
+      matrix:
+        features:
+          [
+            {
+              bench: "notifications_protocol",
+            },
+            {
+              bench: "request_response_protocol",
+            },
+          ]
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v4
+
+      - name: Run Benchmarks
+        id: run-benchmarks
+        run: |
+          mkdir -p ./charts
+          forklift cargo bench -p sc-network --bench ${{ matrix.features.bench }} -- --output-format bencher | grep "^test" | tee ./charts/networking-bench.txt || echo "Benchmarks failed"
+          ls -lsa ./charts
+
+      - name: Upload artifacts
+        uses: actions/upload-artifact@v4.3.6
+        with:
+          name: ${{ matrix.features.bench }}-${{ github.sha }}
+          path: ./charts
+
+  publish-benchmarks:
+    timeout-minutes: 60
+    needs: [build]
+    if: github.ref == 'refs/heads/master'
+    environment: subsystem-benchmarks
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v4
+        with:
+          ref: gh-pages
+          fetch-depth: 0
+
+      - run: git checkout master --
+
+      - name: Download artifacts
+        uses: actions/download-artifact@v4.1.8
+        with:
+          name: networking-bench-${{ github.sha }}
+          path: ./charts
+
+      - name: Setup git
+        run: |
+          # Fixes "detected dubious ownership" error in the ci
+          git config --global --add safe.directory '*'
+          ls -lsR ./charts
+
+      - uses: actions/create-github-app-token@v1
+        id: app-token
+        with:
+          app-id: ${{ secrets.POLKADOTSDK_GHPAGES_APP_ID }}
+          private-key: ${{ secrets.POLKADOTSDK_GHPAGES_APP_KEY }}
+
+      - name: Generate ${{ env.BENCH }}
+        env:
+          BENCH: notifications_protocol
+        uses: benchmark-action/github-action-benchmark@v1
+        with:
+          tool: "cargo"
+          output-file-path: ./charts/${{ env.BENCH }}.txt
+          benchmark-data-dir-path: ./bench/${{ env.BENCH }}
+          github-token: ${{ steps.app-token.outputs.token }}
+          auto-push: true
+
+      - name: Generate ${{ env.BENCH }}
+        env:
+          BENCH: request_response_protocol
+        uses: benchmark-action/github-action-benchmark@v1
+        with:
+          tool: "cargo"
+          output-file-path: ./charts/${{ env.BENCH }}.txt
+          benchmark-data-dir-path: ./bench/${{ env.BENCH }}
+          github-token: ${{ steps.app-token.outputs.token }}
+          auto-push: true
-- 
GitLab


From 20513d6fec617acf783fef8db872beb0584b6a9b Mon Sep 17 00:00:00 2001
From: PG Herveou <pgherveou@gmail.com>
Date: Thu, 2 Jan 2025 19:36:45 +0100
Subject: [PATCH 009/116] [pallet-revive] fix file case (#6981)

fix https://github.com/paritytech/polkadot-sdk/issues/6970

---------

Co-authored-by: command-bot <>
---
 prdoc/pr_6981.prdoc                           |   7 ++
 .../js/abi/{ErrorTester.json => Errors.json}  |   0
 .../js/abi/{ErrorTester.ts => Errors.ts}      |   2 +-
 .../revive/rpc/examples/js/abi/errorTester.ts | 106 ------------------
 .../contracts/{ErrorTester.sol => Errors.sol} |   2 +-
 .../{ErrorTester.polkavm => Errors.polkavm}   | Bin
 .../rpc/examples/js/src/geth-diff.test.ts     |  54 ++++-----
 substrate/frame/revive/rpc/src/tests.rs       |   2 +-
 8 files changed, 37 insertions(+), 136 deletions(-)
 create mode 100644 prdoc/pr_6981.prdoc
 rename substrate/frame/revive/rpc/examples/js/abi/{ErrorTester.json => Errors.json} (100%)
 rename substrate/frame/revive/rpc/examples/js/abi/{ErrorTester.ts => Errors.ts} (98%)
 delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/errorTester.ts
 rename substrate/frame/revive/rpc/examples/js/contracts/{ErrorTester.sol => Errors.sol} (98%)
 rename substrate/frame/revive/rpc/examples/js/pvm/{ErrorTester.polkavm => Errors.polkavm} (100%)

diff --git a/prdoc/pr_6981.prdoc b/prdoc/pr_6981.prdoc
new file mode 100644
index 00000000000..8ed70e51ef4
--- /dev/null
+++ b/prdoc/pr_6981.prdoc
@@ -0,0 +1,7 @@
+title: '[pallet-revive] fix file case'
+doc:
+- audience: Runtime Dev
+  description: "fix https://github.com/paritytech/polkadot-sdk/issues/6970\r\n"
+crates:
+- name: pallet-revive-eth-rpc
+  bump: minor
diff --git a/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.json b/substrate/frame/revive/rpc/examples/js/abi/Errors.json
similarity index 100%
rename from substrate/frame/revive/rpc/examples/js/abi/ErrorTester.json
rename to substrate/frame/revive/rpc/examples/js/abi/Errors.json
diff --git a/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts b/substrate/frame/revive/rpc/examples/js/abi/Errors.ts
similarity index 98%
rename from substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts
rename to substrate/frame/revive/rpc/examples/js/abi/Errors.ts
index f3776e498fd..b39567531c6 100644
--- a/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts
+++ b/substrate/frame/revive/rpc/examples/js/abi/Errors.ts
@@ -1,4 +1,4 @@
-export const ErrorTesterAbi = [
+export const ErrorsAbi = [
   {
     inputs: [
       {
diff --git a/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts b/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts
deleted file mode 100644
index f3776e498fd..00000000000
--- a/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts
+++ /dev/null
@@ -1,106 +0,0 @@
-export const ErrorTesterAbi = [
-  {
-    inputs: [
-      {
-        internalType: "string",
-        name: "message",
-        type: "string",
-      },
-    ],
-    name: "CustomError",
-    type: "error",
-  },
-  {
-    inputs: [
-      {
-        internalType: "bool",
-        name: "newState",
-        type: "bool",
-      },
-    ],
-    name: "setState",
-    outputs: [],
-    stateMutability: "nonpayable",
-    type: "function",
-  },
-  {
-    inputs: [],
-    name: "state",
-    outputs: [
-      {
-        internalType: "bool",
-        name: "",
-        type: "bool",
-      },
-    ],
-    stateMutability: "view",
-    type: "function",
-  },
-  {
-    inputs: [],
-    name: "triggerAssertError",
-    outputs: [],
-    stateMutability: "pure",
-    type: "function",
-  },
-  {
-    inputs: [],
-    name: "triggerCustomError",
-    outputs: [],
-    stateMutability: "pure",
-    type: "function",
-  },
-  {
-    inputs: [],
-    name: "triggerDivisionByZero",
-    outputs: [
-      {
-        internalType: "uint256",
-        name: "",
-        type: "uint256",
-      },
-    ],
-    stateMutability: "pure",
-    type: "function",
-  },
-  {
-    inputs: [],
-    name: "triggerOutOfBoundsError",
-    outputs: [
-      {
-        internalType: "uint256",
-        name: "",
-        type: "uint256",
-      },
-    ],
-    stateMutability: "pure",
-    type: "function",
-  },
-  {
-    inputs: [],
-    name: "triggerRequireError",
-    outputs: [],
-    stateMutability: "pure",
-    type: "function",
-  },
-  {
-    inputs: [],
-    name: "triggerRevertError",
-    outputs: [],
-    stateMutability: "pure",
-    type: "function",
-  },
-  {
-    inputs: [
-      {
-        internalType: "uint256",
-        name: "value",
-        type: "uint256",
-      },
-    ],
-    name: "valueMatch",
-    outputs: [],
-    stateMutability: "payable",
-    type: "function",
-  },
-] as const;
diff --git a/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol b/substrate/frame/revive/rpc/examples/js/contracts/Errors.sol
similarity index 98%
rename from substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol
rename to substrate/frame/revive/rpc/examples/js/contracts/Errors.sol
index f1fdd219624..abbdba8d32e 100644
--- a/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol
+++ b/substrate/frame/revive/rpc/examples/js/contracts/Errors.sol
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: MIT
 pragma solidity ^0.8.0;
 
-contract ErrorTester {
+contract Errors {
 	bool public state;
 
 	// Payable function that can be used to test insufficient funds errors
diff --git a/substrate/frame/revive/rpc/examples/js/pvm/ErrorTester.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/Errors.polkavm
similarity index 100%
rename from substrate/frame/revive/rpc/examples/js/pvm/ErrorTester.polkavm
rename to substrate/frame/revive/rpc/examples/js/pvm/Errors.polkavm
diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
index 37ebbc9ea3b..b9ee877927b 100644
--- a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
+++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
@@ -1,7 +1,7 @@
 import { jsonRpcErrors, procs, createEnv, getByteCode } from './geth-diff-setup.ts'
 import { afterAll, afterEach, beforeAll, describe, expect, test } from 'bun:test'
 import { encodeFunctionData, Hex, parseEther } from 'viem'
-import { ErrorTesterAbi } from '../abi/ErrorTester'
+import { ErrorsAbi } from '../abi/Errors'
 import { FlipperCallerAbi } from '../abi/FlipperCaller'
 import { FlipperAbi } from '../abi/Flipper'
 
@@ -17,19 +17,19 @@ const envs = await Promise.all([createEnv('geth'), createEnv('kitchensink')])
 
 for (const env of envs) {
 	describe(env.serverWallet.chain.name, () => {
-		let errorTesterAddr: Hex = '0x'
+		let errorsAddr: Hex = '0x'
 		let flipperAddr: Hex = '0x'
 		let flipperCallerAddr: Hex = '0x'
 		beforeAll(async () => {
 			{
 				const hash = await env.serverWallet.deployContract({
-					abi: ErrorTesterAbi,
-					bytecode: getByteCode('errorTester', env.evm),
+					abi: ErrorsAbi,
+					bytecode: getByteCode('errors', env.evm),
 				})
 				const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash })
 				if (!deployReceipt.contractAddress)
 					throw new Error('Contract address should be set')
-				errorTesterAddr = deployReceipt.contractAddress
+				errorsAddr = deployReceipt.contractAddress
 			}
 
 			{
@@ -60,8 +60,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'triggerAssertError',
 				})
 			} catch (err) {
@@ -78,8 +78,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'triggerRevertError',
 				})
 			} catch (err) {
@@ -96,8 +96,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'triggerDivisionByZero',
 				})
 			} catch (err) {
@@ -116,8 +116,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'triggerOutOfBoundsError',
 				})
 			} catch (err) {
@@ -136,8 +136,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'triggerCustomError',
 				})
 			} catch (err) {
@@ -154,8 +154,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.simulateContract({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'valueMatch',
 					value: parseEther('10'),
 					args: [parseEther('10')],
@@ -187,8 +187,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.estimateContractGas({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'valueMatch',
 					value: parseEther('10'),
 					args: [parseEther('10')],
@@ -205,8 +205,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.accountWallet.estimateContractGas({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'valueMatch',
 					value: parseEther('10'),
 					args: [parseEther('10')],
@@ -223,8 +223,8 @@ for (const env of envs) {
 			expect.assertions(3)
 			try {
 				await env.serverWallet.estimateContractGas({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'valueMatch',
 					value: parseEther('11'),
 					args: [parseEther('10')],
@@ -255,8 +255,8 @@ for (const env of envs) {
 				expect(balance).toBe(0n)
 
 				await env.accountWallet.estimateContractGas({
-					address: errorTesterAddr,
-					abi: ErrorTesterAbi,
+					address: errorsAddr,
+					abi: ErrorsAbi,
 					functionName: 'setState',
 					args: [true],
 				})
@@ -273,7 +273,7 @@ for (const env of envs) {
 			expect(balance).toBe(0n)
 
 			const data = encodeFunctionData({
-				abi: ErrorTesterAbi,
+				abi: ErrorsAbi,
 				functionName: 'setState',
 				args: [true],
 			})
@@ -284,7 +284,7 @@ for (const env of envs) {
 					{
 						data,
 						from: env.accountWallet.account.address,
-						to: errorTesterAddr,
+						to: errorsAddr,
 					},
 				],
 			})
diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs
index 43b600c33d7..e64e16d45b2 100644
--- a/substrate/frame/revive/rpc/src/tests.rs
+++ b/substrate/frame/revive/rpc/src/tests.rs
@@ -222,7 +222,7 @@ async fn deploy_and_call() -> anyhow::Result<()> {
 async fn revert_call() -> anyhow::Result<()> {
 	let _lock = SHARED_RESOURCES.write();
 	let client = SharedResources::client().await;
-	let (bytecode, contract) = get_contract("ErrorTester")?;
+	let (bytecode, contract) = get_contract("Errors")?;
 	let receipt = TransactionBuilder::default()
 		.input(bytecode)
 		.send_and_wait_for_receipt(&client)
-- 
GitLab


From bdd11933dd9399f39d9eb74915117e6c94a905f1 Mon Sep 17 00:00:00 2001
From: 0xLucca <95830307+0xLucca@users.noreply.github.com>
Date: Thu, 2 Jan 2025 16:14:21 -0300
Subject: [PATCH 010/116] Remove warning log from frame-omni-bencher CLI
 (#7020)

# Description

This PR removes the outdated warning message from the
`frame-omni-bencher` CLI that states the tool is "not yet battle
tested". Fixes #7019

## Integration

No integration steps are required.

## Review Notes

The functionality of the tool remains unchanged. Removes the warning
message from the CLI output.

---------

Co-authored-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
Co-authored-by: command-bot <>
---
 prdoc/pr_7020.prdoc                            | 18 ++++++++++++++++++
 substrate/utils/frame/omni-bencher/src/main.rs |  2 --
 2 files changed, 18 insertions(+), 2 deletions(-)
 create mode 100644 prdoc/pr_7020.prdoc

diff --git a/prdoc/pr_7020.prdoc b/prdoc/pr_7020.prdoc
new file mode 100644
index 00000000000..5bbdb44c45a
--- /dev/null
+++ b/prdoc/pr_7020.prdoc
@@ -0,0 +1,18 @@
+title: Remove warning log from frame-omni-bencher CLI
+doc:
+- audience: Node Operator
+  description: |-
+    # Description
+
+    This PR removes the outdated warning message from the `frame-omni-bencher` CLI that states the tool is "not yet battle tested". Fixes #7019
+
+    ## Integration
+
+    No integration steps are required.
+
+    ## Review Notes
+
+    The functionality of the tool remains unchanged. Removes the warning message from the CLI output.
+crates:
+- name: frame-omni-bencher
+  bump: patch
diff --git a/substrate/utils/frame/omni-bencher/src/main.rs b/substrate/utils/frame/omni-bencher/src/main.rs
index 7d8aa891dc4..f0f9ab753b0 100644
--- a/substrate/utils/frame/omni-bencher/src/main.rs
+++ b/substrate/utils/frame/omni-bencher/src/main.rs
@@ -24,8 +24,6 @@ use tracing_subscriber::EnvFilter;
 fn main() -> Result<()> {
 	setup_logger();
 
-	log::warn!("The FRAME omni-bencher is not yet battle tested - double check the results.",);
-
 	command::Command::parse().run()
 }
 
-- 
GitLab


From 472945703925a1beb094439fd7e43149c44960d5 Mon Sep 17 00:00:00 2001
From: Guillaume Thiolliere <gui.thiolliere@gmail.com>
Date: Fri, 3 Jan 2025 04:29:44 +0900
Subject: [PATCH 011/116] Fix polkadot sdk doc. (#7022)

If you see the doc
https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html
The runtime part introduction is missing.

Co-authored-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
---
 docs/sdk/src/polkadot_sdk/frame_runtime.rs | 1 +
 1 file changed, 1 insertion(+)

diff --git a/docs/sdk/src/polkadot_sdk/frame_runtime.rs b/docs/sdk/src/polkadot_sdk/frame_runtime.rs
index 8acf19f7641..24595e445fd 100644
--- a/docs/sdk/src/polkadot_sdk/frame_runtime.rs
+++ b/docs/sdk/src/polkadot_sdk/frame_runtime.rs
@@ -57,6 +57,7 @@
 //! The following example showcases a minimal pallet.
 #![doc = docify::embed!("src/polkadot_sdk/frame_runtime.rs", pallet)]
 //!
+//! ## Runtime
 //!
 //! A runtime is a collection of pallets that are amalgamated together. Each pallet typically has
 //! some configurations (exposed as a `trait Config`) that needs to be *specified* in the runtime.
-- 
GitLab


From b7e2695163e97fcacd8264a4291375ce66a95afc Mon Sep 17 00:00:00 2001
From: Xavier Lau <x@acg.box>
Date: Fri, 3 Jan 2025 05:18:18 +0800
Subject: [PATCH 012/116] Improve remote externalities logging (#7021)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Changes:
- Automatically detect if current env is tty. If not disable the spinner
logging.
- Add `Result` type.
- Format log style.

Originally reported from:
-
https://github.com/hack-ink/polkadot-runtime-releaser/blob/4811d2b419649a73edd5bd1f748a858b846eb139/action/try-runtime/action.yml#L75-L91
-
https://github.com/hack-ink/polkadot-runtime-releaser-workshop/pull/3#issuecomment-2563883943

Closes #7010.

---

Polkadot address: 156HGo9setPcU2qhFMVWLkcmtCEGySLwNqa3DaEiYSWtte4Y

---------

Signed-off-by: Xavier Lau <x@acg.box>
Co-authored-by: command-bot <>
Co-authored-by: Bastian Köcher <git@kchr.de>
Co-authored-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
---
 prdoc/pr_7021.prdoc                           |   8 +
 .../frame/remote-externalities/src/lib.rs     | 219 +++++++++---------
 .../frame/remote-externalities/src/logging.rs |  86 +++++++
 3 files changed, 205 insertions(+), 108 deletions(-)
 create mode 100644 prdoc/pr_7021.prdoc
 create mode 100644 substrate/utils/frame/remote-externalities/src/logging.rs

diff --git a/prdoc/pr_7021.prdoc b/prdoc/pr_7021.prdoc
new file mode 100644
index 00000000000..5443579bbd9
--- /dev/null
+++ b/prdoc/pr_7021.prdoc
@@ -0,0 +1,8 @@
+title: Improve remote externalities logging
+doc:
+- audience: Node Dev
+  description: |-
+    Automatically detect if current env is tty. If not disable the spinner logging.
+crates:
+- name: frame-remote-externalities
+  bump: patch
diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs
index 75a2ac2aef4..4c49663260b 100644
--- a/substrate/utils/frame/remote-externalities/src/lib.rs
+++ b/substrate/utils/frame/remote-externalities/src/lib.rs
@@ -20,6 +20,8 @@
 //! An equivalent of `sp_io::TestExternalities` that can load its state from a remote substrate
 //! based chain, or a local state snapshot file.
 
+mod logging;
+
 use codec::{Compact, Decode, Encode};
 use indicatif::{ProgressBar, ProgressStyle};
 use jsonrpsee::{core::params::ArrayParams, http_client::HttpClient};
@@ -37,7 +39,6 @@ use sp_runtime::{
 	StateVersion,
 };
 use sp_state_machine::TestExternalities;
-use spinners::{Spinner, Spinners};
 use std::{
 	cmp::{max, min},
 	fs,
@@ -49,6 +50,8 @@ use std::{
 use substrate_rpc_client::{rpc_params, BatchRequestBuilder, ChainApi, ClientT, StateApi};
 use tokio_retry::{strategy::FixedInterval, Retry};
 
+type Result<T, E = &'static str> = std::result::Result<T, E>;
+
 type KeyValue = (StorageKey, StorageData);
 type TopKeyValues = Vec<KeyValue>;
 type ChildKeyValues = Vec<(ChildInfo, Vec<KeyValue>)>;
@@ -87,7 +90,7 @@ impl<B: BlockT> Snapshot<B> {
 		}
 	}
 
-	fn load(path: &PathBuf) -> Result<Snapshot<B>, &'static str> {
+	fn load(path: &PathBuf) -> Result<Snapshot<B>> {
 		let bytes = fs::read(path).map_err(|_| "fs::read failed.")?;
 		// The first item in the SCALE encoded struct bytes is the snapshot version. We decode and
 		// check that first, before proceeding to decode the rest of the snapshot.
@@ -168,9 +171,9 @@ impl Transport {
 	}
 
 	// Build an HttpClient from a URI.
-	async fn init(&mut self) -> Result<(), &'static str> {
+	async fn init(&mut self) -> Result<()> {
 		if let Self::Uri(uri) = self {
-			log::debug!(target: LOG_TARGET, "initializing remote client to {:?}", uri);
+			debug!(target: LOG_TARGET, "initializing remote client to {uri:?}");
 
 			// If we have a ws uri, try to convert it to an http uri.
 			// We use an HTTP client rather than WS because WS starts to choke with "accumulated
@@ -178,11 +181,11 @@ impl Transport {
 			// from a node running a default configuration.
 			let uri = if uri.starts_with("ws://") {
 				let uri = uri.replace("ws://", "http://");
-				log::info!(target: LOG_TARGET, "replacing ws:// in uri with http://: {:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)", uri);
+				info!(target: LOG_TARGET, "replacing ws:// in uri with http://: {uri:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)");
 				uri
 			} else if uri.starts_with("wss://") {
 				let uri = uri.replace("wss://", "https://");
-				log::info!(target: LOG_TARGET, "replacing wss:// in uri with https://: {:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)", uri);
+				info!(target: LOG_TARGET, "replacing wss:// in uri with https://: {uri:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)");
 				uri
 			} else {
 				uri.clone()
@@ -193,7 +196,7 @@ impl Transport {
 				.request_timeout(std::time::Duration::from_secs(60 * 5))
 				.build(uri)
 				.map_err(|e| {
-					log::error!(target: LOG_TARGET, "error: {:?}", e);
+					error!(target: LOG_TARGET, "error: {e:?}");
 					"failed to build http client"
 				})?;
 
@@ -364,23 +367,23 @@ where
 		&self,
 		key: StorageKey,
 		maybe_at: Option<B::Hash>,
-	) -> Result<Option<StorageData>, &'static str> {
+	) -> Result<Option<StorageData>> {
 		trace!(target: LOG_TARGET, "rpc: get_storage");
 		self.as_online().rpc_client().storage(key, maybe_at).await.map_err(|e| {
-			error!(target: LOG_TARGET, "Error = {:?}", e);
+			error!(target: LOG_TARGET, "Error = {e:?}");
 			"rpc get_storage failed."
 		})
 	}
 
 	/// Get the latest finalized head.
-	async fn rpc_get_head(&self) -> Result<B::Hash, &'static str> {
+	async fn rpc_get_head(&self) -> Result<B::Hash> {
 		trace!(target: LOG_TARGET, "rpc: finalized_head");
 
 		// sadly this pretty much unreadable...
 		ChainApi::<(), _, B::Header, ()>::finalized_head(self.as_online().rpc_client())
 			.await
 			.map_err(|e| {
-				error!(target: LOG_TARGET, "Error = {:?}", e);
+				error!(target: LOG_TARGET, "Error = {e:?}");
 				"rpc finalized_head failed."
 			})
 	}
@@ -390,13 +393,13 @@ where
 		prefix: Option<StorageKey>,
 		start_key: Option<StorageKey>,
 		at: B::Hash,
-	) -> Result<Vec<StorageKey>, &'static str> {
+	) -> Result<Vec<StorageKey>> {
 		self.as_online()
 			.rpc_client()
 			.storage_keys_paged(prefix, Self::DEFAULT_KEY_DOWNLOAD_PAGE, start_key, Some(at))
 			.await
 			.map_err(|e| {
-				error!(target: LOG_TARGET, "Error = {:?}", e);
+				error!(target: LOG_TARGET, "Error = {e:?}");
 				"rpc get_keys failed"
 			})
 	}
@@ -407,7 +410,7 @@ where
 		prefix: &StorageKey,
 		block: B::Hash,
 		parallel: usize,
-	) -> Result<Vec<StorageKey>, &'static str> {
+	) -> Result<Vec<StorageKey>> {
 		/// Divide the workload and return the start key of each chunks. Guaranteed to return a
 		/// non-empty list.
 		fn gen_start_keys(prefix: &StorageKey) -> Vec<StorageKey> {
@@ -491,7 +494,7 @@ where
 		block: B::Hash,
 		start_key: Option<&StorageKey>,
 		end_key: Option<&StorageKey>,
-	) -> Result<Vec<StorageKey>, &'static str> {
+	) -> Result<Vec<StorageKey>> {
 		let mut last_key: Option<&StorageKey> = start_key;
 		let mut keys: Vec<StorageKey> = vec![];
 
@@ -518,11 +521,11 @@ where
 			// scraping out of range or no more matches,
 			// we are done either way
 			if page_len < Self::DEFAULT_KEY_DOWNLOAD_PAGE as usize {
-				log::debug!(target: LOG_TARGET, "last page received: {}", page_len);
+				debug!(target: LOG_TARGET, "last page received: {page_len}");
 				break
 			}
 
-			log::debug!(
+			debug!(
 				target: LOG_TARGET,
 				"new total = {}, full page received: {}",
 				keys.len(),
@@ -589,11 +592,10 @@ where
 		let total_payloads = payloads.len();
 
 		while start_index < total_payloads {
-			log::debug!(
+			debug!(
 				target: LOG_TARGET,
-				"Remaining payloads: {} Batch request size: {}",
+				"Remaining payloads: {} Batch request size: {batch_size}",
 				total_payloads - start_index,
-				batch_size,
 			);
 
 			let end_index = usize::min(start_index + batch_size, total_payloads);
@@ -620,18 +622,16 @@ where
 
 					retries += 1;
 					let failure_log = format!(
-						"Batch request failed ({}/{} retries). Error: {}",
-						retries,
-						Self::MAX_RETRIES,
-						e
+						"Batch request failed ({retries}/{} retries). Error: {e}",
+						Self::MAX_RETRIES
 					);
 					// after 2 subsequent failures something very wrong is happening. log a warning
 					// and reset the batch size down to 1.
 					if retries >= 2 {
-						log::warn!("{}", failure_log);
+						warn!("{failure_log}");
 						batch_size = 1;
 					} else {
-						log::debug!("{}", failure_log);
+						debug!("{failure_log}");
 						// Decrease batch size by DECREASE_FACTOR
 						batch_size =
 							(batch_size as f32 * Self::BATCH_SIZE_DECREASE_FACTOR) as usize;
@@ -655,13 +655,11 @@ where
 				)
 			};
 
-			log::debug!(
+			debug!(
 				target: LOG_TARGET,
-				"Request duration: {:?} Target duration: {:?} Last batch size: {} Next batch size: {}",
-				request_duration,
+				"Request duration: {request_duration:?} Target duration: {:?} Last batch size: {} Next batch size: {batch_size}",
 				Self::REQUEST_DURATION_TARGET,
 				end_index - start_index,
-				batch_size
 			);
 
 			let batch_response_len = batch_response.len();
@@ -689,21 +687,24 @@ where
 		prefix: StorageKey,
 		at: B::Hash,
 		pending_ext: &mut TestExternalities<HashingFor<B>>,
-	) -> Result<Vec<KeyValue>, &'static str> {
-		let start = Instant::now();
-		let mut sp = Spinner::with_timer(Spinners::Dots, "Scraping keys...".into());
-		// TODO We could start downloading when having collected the first batch of keys
-		// https://github.com/paritytech/polkadot-sdk/issues/2494
-		let keys = self
-			.rpc_get_keys_parallel(&prefix, at, Self::PARALLEL_REQUESTS)
-			.await?
-			.into_iter()
-			.collect::<Vec<_>>();
-		sp.stop_with_message(format!(
-			"✅ Found {} keys ({:.2}s)",
-			keys.len(),
-			start.elapsed().as_secs_f32()
-		));
+	) -> Result<Vec<KeyValue>> {
+		let keys = logging::with_elapsed_async(
+			|| async {
+				// TODO: We could start downloading when having collected the first batch of keys.
+				// https://github.com/paritytech/polkadot-sdk/issues/2494
+				let keys = self
+					.rpc_get_keys_parallel(&prefix, at, Self::PARALLEL_REQUESTS)
+					.await?
+					.into_iter()
+					.collect::<Vec<_>>();
+
+				Ok(keys)
+			},
+			"Scraping keys...",
+			|keys| format!("Found {} keys", keys.len()),
+		)
+		.await?;
+
 		if keys.is_empty() {
 			return Ok(Default::default())
 		}
@@ -735,7 +736,7 @@ where
 		let storage_data = match storage_data_result {
 			Ok(storage_data) => storage_data.into_iter().flatten().collect::<Vec<_>>(),
 			Err(e) => {
-				log::error!(target: LOG_TARGET, "Error while getting storage data: {}", e);
+				error!(target: LOG_TARGET, "Error while getting storage data: {e}");
 				return Err("Error while getting storage data")
 			},
 		};
@@ -751,27 +752,31 @@ where
 			.map(|(key, maybe_value)| match maybe_value {
 				Some(data) => (key.clone(), data),
 				None => {
-					log::warn!(target: LOG_TARGET, "key {:?} had none corresponding value.", &key);
+					warn!(target: LOG_TARGET, "key {key:?} had none corresponding value.");
 					let data = StorageData(vec![]);
 					(key.clone(), data)
 				},
 			})
 			.collect::<Vec<_>>();
 
-		let mut sp = Spinner::with_timer(Spinners::Dots, "Inserting keys into DB...".into());
-		let start = Instant::now();
-		pending_ext.batch_insert(key_values.clone().into_iter().filter_map(|(k, v)| {
-			// Don't insert the child keys here, they need to be inserted separately with all their
-			// data in the load_child_remote function.
-			match is_default_child_storage_key(&k.0) {
-				true => None,
-				false => Some((k.0, v.0)),
-			}
-		}));
-		sp.stop_with_message(format!(
-			"✅ Inserted keys into DB ({:.2}s)",
-			start.elapsed().as_secs_f32()
-		));
+		logging::with_elapsed(
+			|| {
+				pending_ext.batch_insert(key_values.clone().into_iter().filter_map(|(k, v)| {
+					// Don't insert the child keys here, they need to be inserted separately with
+					// all their data in the load_child_remote function.
+					match is_default_child_storage_key(&k.0) {
+						true => None,
+						false => Some((k.0, v.0)),
+					}
+				}));
+
+				Ok(())
+			},
+			"Inserting keys into DB...",
+			|_| "Inserted keys into DB".into(),
+		)
+		.expect("must succeed; qed");
+
 		Ok(key_values)
 	}
 
@@ -781,7 +786,7 @@ where
 		prefixed_top_key: &StorageKey,
 		child_keys: Vec<StorageKey>,
 		at: B::Hash,
-	) -> Result<Vec<KeyValue>, &'static str> {
+	) -> Result<Vec<KeyValue>> {
 		let child_keys_len = child_keys.len();
 
 		let payloads = child_keys
@@ -803,7 +808,7 @@ where
 			match Self::get_storage_data_dynamic_batch_size(client, payloads, &bar).await {
 				Ok(storage_data) => storage_data,
 				Err(e) => {
-					log::error!(target: LOG_TARGET, "batch processing failed: {:?}", e);
+					error!(target: LOG_TARGET, "batch processing failed: {e:?}");
 					return Err("batch processing failed")
 				},
 			};
@@ -816,7 +821,7 @@ where
 			.map(|(key, maybe_value)| match maybe_value {
 				Some(v) => (key.clone(), v),
 				None => {
-					log::warn!(target: LOG_TARGET, "key {:?} had no corresponding value.", &key);
+					warn!(target: LOG_TARGET, "key {key:?} had no corresponding value.");
 					(key.clone(), StorageData(vec![]))
 				},
 			})
@@ -828,7 +833,7 @@ where
 		prefixed_top_key: &StorageKey,
 		child_prefix: StorageKey,
 		at: B::Hash,
-	) -> Result<Vec<StorageKey>, &'static str> {
+	) -> Result<Vec<StorageKey>> {
 		let retry_strategy =
 			FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES);
 		let mut all_child_keys = Vec::new();
@@ -850,7 +855,7 @@ where
 			let child_keys = Retry::spawn(retry_strategy.clone(), get_child_keys_closure)
 				.await
 				.map_err(|e| {
-					error!(target: LOG_TARGET, "Error = {:?}", e);
+					error!(target: LOG_TARGET, "Error = {e:?}");
 					"rpc child_get_keys failed."
 				})?;
 
@@ -896,7 +901,7 @@ where
 		&self,
 		top_kv: &[KeyValue],
 		pending_ext: &mut TestExternalities<HashingFor<B>>,
-	) -> Result<ChildKeyValues, &'static str> {
+	) -> Result<ChildKeyValues> {
 		let child_roots = top_kv
 			.iter()
 			.filter(|(k, _)| is_default_child_storage_key(k.as_ref()))
@@ -904,7 +909,7 @@ where
 			.collect::<Vec<_>>();
 
 		if child_roots.is_empty() {
-			info!(target: LOG_TARGET, "👩‍👦 no child roots found to scrape",);
+			info!(target: LOG_TARGET, "👩‍👦 no child roots found to scrape");
 			return Ok(Default::default())
 		}
 
@@ -930,7 +935,7 @@ where
 			let un_prefixed = match ChildType::from_prefixed_key(&prefixed_top_key) {
 				Some((ChildType::ParentKeyId, storage_key)) => storage_key,
 				None => {
-					log::error!(target: LOG_TARGET, "invalid key: {:?}", prefixed_top_key);
+					error!(target: LOG_TARGET, "invalid key: {prefixed_top_key:?}");
 					return Err("Invalid child key")
 				},
 			};
@@ -954,13 +959,13 @@ where
 	async fn load_top_remote(
 		&self,
 		pending_ext: &mut TestExternalities<HashingFor<B>>,
-	) -> Result<TopKeyValues, &'static str> {
+	) -> Result<TopKeyValues> {
 		let config = self.as_online();
 		let at = self
 			.as_online()
 			.at
 			.expect("online config must be initialized by this point; qed.");
-		log::info!(target: LOG_TARGET, "scraping key-pairs from remote at block height {:?}", at);
+		info!(target: LOG_TARGET, "scraping key-pairs from remote at block height {at:?}");
 
 		let mut keys_and_values = Vec::new();
 		for prefix in &config.hashed_prefixes {
@@ -968,7 +973,7 @@ where
 			let additional_key_values =
 				self.rpc_get_pairs(StorageKey(prefix.to_vec()), at, pending_ext).await?;
 			let elapsed = now.elapsed();
-			log::info!(
+			info!(
 				target: LOG_TARGET,
 				"adding data for hashed prefix: {:?}, took {:.2}s",
 				HexDisplay::from(prefix),
@@ -979,7 +984,7 @@ where
 
 		for key in &config.hashed_keys {
 			let key = StorageKey(key.to_vec());
-			log::info!(
+			info!(
 				target: LOG_TARGET,
 				"adding data for hashed key: {:?}",
 				HexDisplay::from(&key)
@@ -990,7 +995,7 @@ where
 					keys_and_values.push((key, value));
 				},
 				None => {
-					log::warn!(
+					warn!(
 						target: LOG_TARGET,
 						"no data found for hashed key: {:?}",
 						HexDisplay::from(&key)
@@ -1005,17 +1010,16 @@ where
 	/// The entry point of execution, if `mode` is online.
 	///
 	/// initializes the remote client in `transport`, and sets the `at` field, if not specified.
-	async fn init_remote_client(&mut self) -> Result<(), &'static str> {
+	async fn init_remote_client(&mut self) -> Result<()> {
 		// First, initialize the http client.
 		self.as_online_mut().transport.init().await?;
 
 		// Then, if `at` is not set, set it.
 		if self.as_online().at.is_none() {
 			let at = self.rpc_get_head().await?;
-			log::info!(
+			info!(
 				target: LOG_TARGET,
-				"since no at is provided, setting it to latest finalized head, {:?}",
-				at
+				"since no at is provided, setting it to latest finalized head, {at:?}",
 			);
 			self.as_online_mut().at = Some(at);
 		}
@@ -1040,7 +1044,7 @@ where
 			.filter(|p| *p != DEFAULT_CHILD_STORAGE_KEY_PREFIX)
 			.count() == 0
 		{
-			log::info!(
+			info!(
 				target: LOG_TARGET,
 				"since no prefix is filtered, the data for all pallets will be downloaded"
 			);
@@ -1050,7 +1054,7 @@ where
 		Ok(())
 	}
 
-	async fn load_header(&self) -> Result<B::Header, &'static str> {
+	async fn load_header(&self) -> Result<B::Header> {
 		let retry_strategy =
 			FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES);
 		let get_header_closure = || {
@@ -1069,14 +1073,12 @@ where
 	/// `load_child_remote`.
 	///
 	/// Must be called after `init_remote_client`.
-	async fn load_remote_and_maybe_save(
-		&mut self,
-	) -> Result<TestExternalities<HashingFor<B>>, &'static str> {
+	async fn load_remote_and_maybe_save(&mut self) -> Result<TestExternalities<HashingFor<B>>> {
 		let state_version =
 			StateApi::<B::Hash>::runtime_version(self.as_online().rpc_client(), None)
 				.await
 				.map_err(|e| {
-					error!(target: LOG_TARGET, "Error = {:?}", e);
+					error!(target: LOG_TARGET, "Error = {e:?}");
 					"rpc runtime_version failed."
 				})
 				.map(|v| v.state_version())?;
@@ -1100,11 +1102,10 @@ where
 				self.load_header().await?,
 			);
 			let encoded = snapshot.encode();
-			log::info!(
+			info!(
 				target: LOG_TARGET,
-				"writing snapshot of {} bytes to {:?}",
+				"writing snapshot of {} bytes to {path:?}",
 				encoded.len(),
-				path
 			);
 			std::fs::write(path, encoded).map_err(|_| "fs::write failed")?;
 
@@ -1119,33 +1120,35 @@ where
 		Ok(pending_ext)
 	}
 
-	async fn do_load_remote(&mut self) -> Result<RemoteExternalities<B>, &'static str> {
+	async fn do_load_remote(&mut self) -> Result<RemoteExternalities<B>> {
 		self.init_remote_client().await?;
 		let inner_ext = self.load_remote_and_maybe_save().await?;
 		Ok(RemoteExternalities { header: self.load_header().await?, inner_ext })
 	}
 
-	fn do_load_offline(
-		&mut self,
-		config: OfflineConfig,
-	) -> Result<RemoteExternalities<B>, &'static str> {
-		let mut sp = Spinner::with_timer(Spinners::Dots, "Loading snapshot...".into());
-		let start = Instant::now();
-		info!(target: LOG_TARGET, "Loading snapshot from {:?}", &config.state_snapshot.path);
-		let Snapshot { snapshot_version: _, header, state_version, raw_storage, storage_root } =
-			Snapshot::<B>::load(&config.state_snapshot.path)?;
-
-		let inner_ext = TestExternalities::from_raw_snapshot(
-			raw_storage,
-			storage_root,
-			self.overwrite_state_version.unwrap_or(state_version),
-		);
-		sp.stop_with_message(format!("✅ Loaded snapshot ({:.2}s)", start.elapsed().as_secs_f32()));
+	fn do_load_offline(&mut self, config: OfflineConfig) -> Result<RemoteExternalities<B>> {
+		let (header, inner_ext) = logging::with_elapsed(
+			|| {
+				info!(target: LOG_TARGET, "Loading snapshot from {:?}", &config.state_snapshot.path);
+
+				let Snapshot { header, state_version, raw_storage, storage_root, .. } =
+					Snapshot::<B>::load(&config.state_snapshot.path)?;
+				let inner_ext = TestExternalities::from_raw_snapshot(
+					raw_storage,
+					storage_root,
+					self.overwrite_state_version.unwrap_or(state_version),
+				);
+
+				Ok((header, inner_ext))
+			},
+			"Loading snapshot...",
+			|_| "Loaded snapshot".into(),
+		)?;
 
 		Ok(RemoteExternalities { inner_ext, header })
 	}
 
-	pub(crate) async fn pre_build(mut self) -> Result<RemoteExternalities<B>, &'static str> {
+	pub(crate) async fn pre_build(mut self) -> Result<RemoteExternalities<B>> {
 		let mut ext = match self.mode.clone() {
 			Mode::Offline(config) => self.do_load_offline(config)?,
 			Mode::Online(_) => self.do_load_remote().await?,
@@ -1159,7 +1162,7 @@ where
 
 		// inject manual key values.
 		if !self.hashed_key_values.is_empty() {
-			log::info!(
+			info!(
 				target: LOG_TARGET,
 				"extending externalities with {} manually injected key-values",
 				self.hashed_key_values.len()
@@ -1169,7 +1172,7 @@ where
 
 		// exclude manual key values.
 		if !self.hashed_blacklist.is_empty() {
-			log::info!(
+			info!(
 				target: LOG_TARGET,
 				"excluding externalities from {} keys",
 				self.hashed_blacklist.len()
@@ -1221,7 +1224,7 @@ where
 		self
 	}
 
-	pub async fn build(self) -> Result<RemoteExternalities<B>, &'static str> {
+	pub async fn build(self) -> Result<RemoteExternalities<B>> {
 		let mut ext = self.pre_build().await?;
 		ext.commit_all().unwrap();
 
diff --git a/substrate/utils/frame/remote-externalities/src/logging.rs b/substrate/utils/frame/remote-externalities/src/logging.rs
new file mode 100644
index 00000000000..7ab901c004d
--- /dev/null
+++ b/substrate/utils/frame/remote-externalities/src/logging.rs
@@ -0,0 +1,86 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use std::{
+	future::Future,
+	io::{self, IsTerminal},
+	time::Instant,
+};
+
+use spinners::{Spinner, Spinners};
+
+use super::Result;
+
+// A simple helper to time a operation with a nice spinner, start message, and end message.
+//
+// The spinner is only displayed when stdout is a terminal.
+pub(super) fn with_elapsed<F, R, EndMsg>(f: F, start_msg: &str, end_msg: EndMsg) -> Result<R>
+where
+	F: FnOnce() -> Result<R>,
+	EndMsg: FnOnce(&R) -> String,
+{
+	let timer = Instant::now();
+	let mut maybe_sp = start(start_msg);
+
+	Ok(end(f()?, timer, maybe_sp.as_mut(), end_msg))
+}
+
+// A simple helper to time an async operation with a nice spinner, start message, and end message.
+//
+// The spinner is only displayed when stdout is a terminal.
+pub(super) async fn with_elapsed_async<F, Fut, R, EndMsg>(
+	f: F,
+	start_msg: &str,
+	end_msg: EndMsg,
+) -> Result<R>
+where
+	F: FnOnce() -> Fut,
+	Fut: Future<Output = Result<R>>,
+	EndMsg: FnOnce(&R) -> String,
+{
+	let timer = Instant::now();
+	let mut maybe_sp = start(start_msg);
+
+	Ok(end(f().await?, timer, maybe_sp.as_mut(), end_msg))
+}
+
+fn start(start_msg: &str) -> Option<Spinner> {
+	let msg = format!("⏳ {start_msg}");
+
+	if io::stdout().is_terminal() {
+		Some(Spinner::new(Spinners::Dots, msg))
+	} else {
+		println!("{msg}");
+
+		None
+	}
+}
+
+fn end<T, EndMsg>(val: T, timer: Instant, maybe_sp: Option<&mut Spinner>, end_msg: EndMsg) -> T
+where
+	EndMsg: FnOnce(&T) -> String,
+{
+	let msg = format!("✅ {} in {:.2}s", end_msg(&val), timer.elapsed().as_secs_f32());
+
+	if let Some(sp) = maybe_sp {
+		sp.stop_with_message(msg);
+	} else {
+		println!("{msg}");
+	}
+
+	val
+}
-- 
GitLab


From f3ab3854e1df9e0498599f01ba4f9f152426432a Mon Sep 17 00:00:00 2001
From: Utkarsh Bhardwaj <ub2262000@gmail.com>
Date: Fri, 3 Jan 2025 10:39:39 +0000
Subject: [PATCH 013/116] migrate pallet-mixnet to umbrella crate (#6986)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

# Description

Migrate pallet-mixnet to use umbrella crate whilst adding a few types
and traits in the frame prelude that are used by other pallets as well.

## Review Notes

* This PR migrates `pallet-mixnet` to use the umbrella crate.
* Note that some imports like `use
sp_application_crypto::RuntimeAppPublic;` and imports from
`sp_mixnet::types::` have not been migrated to the umbrella crate as
they are not used in any / many other places and are relevant only to
the `pallet-mixnet`.
* Transaction related helpers to submit transactions from `frame-system`
have been added to the main `prelude` as they have usage across various
pallets.
```Rust
	pub use frame_system::offchain::*;
```
* Exporting `arithmetic` module in the main `prelude` since this is used
a lot throughout various pallets.
* Nightly formatting has been applied using `cargo fmt`
* Benchmarking dependencies have been removed from`palet-mixnet` as
there is no benchmarking.rs present for `pallet-mixnet`. For the same
reason, `"pallet-mixnet?/runtime-benchmarks"` has been removed from
`umbrella/Cargo.toml`.

---------

Co-authored-by: Dónal Murray <donalm@seadanda.dev>
---
 Cargo.lock                        |  7 +---
 prdoc/pr_6986.prdoc               | 18 ++++++++++
 substrate/frame/mixnet/Cargo.toml | 24 ++-----------
 substrate/frame/mixnet/src/lib.rs | 60 ++++++++++++++-----------------
 substrate/frame/src/lib.rs        | 19 ++++++++--
 umbrella/Cargo.toml               |  1 -
 6 files changed, 64 insertions(+), 65 deletions(-)
 create mode 100644 prdoc/pr_6986.prdoc

diff --git a/Cargo.lock b/Cargo.lock
index 6151ed33c5b..3c55a14256c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -14029,18 +14029,13 @@ dependencies = [
 name = "pallet-mixnet"
 version = "0.4.0"
 dependencies = [
- "frame-benchmarking 28.0.0",
- "frame-support 28.0.0",
- "frame-system 28.0.0",
  "log",
  "parity-scale-codec",
+ "polkadot-sdk-frame 0.1.0",
  "scale-info",
  "serde",
  "sp-application-crypto 30.0.0",
- "sp-arithmetic 23.0.0",
- "sp-io 30.0.0",
  "sp-mixnet 0.4.0",
- "sp-runtime 31.0.1",
 ]
 
 [[package]]
diff --git a/prdoc/pr_6986.prdoc b/prdoc/pr_6986.prdoc
new file mode 100644
index 00000000000..8deb6b04bd1
--- /dev/null
+++ b/prdoc/pr_6986.prdoc
@@ -0,0 +1,18 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: '[pallet-mixnet] Migrate to using frame umbrella crate'
+
+doc:
+  - audience: Runtime Dev
+    description: This PR migrates the pallet-mixnet to use the frame umbrella crate. This
+      is part of the ongoing effort to migrate all pallets to use the frame umbrella crate.
+      The effort is tracked [here](https://github.com/paritytech/polkadot-sdk/issues/6504).
+
+crates:
+  - name: pallet-mixnet
+    bump: minor
+  - name: polkadot-sdk-frame
+    bump: minor
+  - name: polkadot-sdk
+    bump: none
\ No newline at end of file
diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml
index bb5e8486456..0ae3b3938c6 100644
--- a/substrate/frame/mixnet/Cargo.toml
+++ b/substrate/frame/mixnet/Cargo.toml
@@ -17,42 +17,24 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { features = ["derive", "max-encoded-len"], workspace = true }
-frame-benchmarking = { optional = true, workspace = true }
-frame-support = { workspace = true }
-frame-system = { workspace = true }
+frame = { workspace = true, features = ["experimental", "runtime"] }
 log = { workspace = true }
 scale-info = { features = ["derive"], workspace = true }
 serde = { features = ["derive"], workspace = true }
 sp-application-crypto = { workspace = true }
-sp-arithmetic = { workspace = true }
-sp-io = { workspace = true }
 sp-mixnet = { workspace = true }
-sp-runtime = { workspace = true }
 
 [features]
 default = ["std"]
 std = [
 	"codec/std",
-	"frame-benchmarking?/std",
-	"frame-support/std",
-	"frame-system/std",
+	"frame/std",
 	"log/std",
 	"scale-info/std",
 	"serde/std",
 	"sp-application-crypto/std",
-	"sp-arithmetic/std",
-	"sp-io/std",
 	"sp-mixnet/std",
-	"sp-runtime/std",
-]
-runtime-benchmarks = [
-	"frame-benchmarking/runtime-benchmarks",
-	"frame-support/runtime-benchmarks",
-	"frame-system/runtime-benchmarks",
-	"sp-runtime/runtime-benchmarks",
 ]
 try-runtime = [
-	"frame-support/try-runtime",
-	"frame-system/try-runtime",
-	"sp-runtime/try-runtime",
+	"frame/try-runtime",
 ]
diff --git a/substrate/frame/mixnet/src/lib.rs b/substrate/frame/mixnet/src/lib.rs
index 6579ed678ae..98498181767 100644
--- a/substrate/frame/mixnet/src/lib.rs
+++ b/substrate/frame/mixnet/src/lib.rs
@@ -23,28 +23,23 @@
 
 extern crate alloc;
 
+pub use pallet::*;
+
 use alloc::vec::Vec;
-use codec::{Decode, Encode, MaxEncodedLen};
 use core::cmp::Ordering;
-use frame_support::{
-	traits::{EstimateNextSessionRotation, Get, OneSessionHandler},
-	BoundedVec,
+use frame::{
+	deps::{
+		sp_io::{self, MultiRemovalResults},
+		sp_runtime,
+	},
+	prelude::*,
 };
-use frame_system::{
-	offchain::{CreateInherent, SubmitTransaction},
-	pallet_prelude::BlockNumberFor,
-};
-pub use pallet::*;
-use scale_info::TypeInfo;
 use serde::{Deserialize, Serialize};
 use sp_application_crypto::RuntimeAppPublic;
-use sp_arithmetic::traits::{CheckedSub, Saturating, UniqueSaturatedInto, Zero};
-use sp_io::MultiRemovalResults;
 use sp_mixnet::types::{
 	AuthorityId, AuthoritySignature, KxPublic, Mixnode, MixnodesErr, PeerId, SessionIndex,
 	SessionPhase, SessionStatus, KX_PUBLIC_SIZE,
 };
-use sp_runtime::RuntimeDebug;
 
 const LOG_TARGET: &str = "runtime::mixnet";
 
@@ -168,12 +163,9 @@ fn twox<BlockNumber: UniqueSaturatedInto<u64>>(
 // The pallet
 ////////////////////////////////////////////////////////////////////////////////
 
-#[frame_support::pallet(dev_mode)]
+#[frame::pallet(dev_mode)]
 pub mod pallet {
 	use super::*;
-	use frame_support::pallet_prelude::*;
-	use frame_system::pallet_prelude::*;
-
 	#[pallet::pallet]
 	pub struct Pallet<T>(_);
 
@@ -254,7 +246,7 @@ pub mod pallet {
 		StorageDoubleMap<_, Identity, SessionIndex, Identity, AuthorityIndex, BoundedMixnodeFor<T>>;
 
 	#[pallet::genesis_config]
-	#[derive(frame_support::DefaultNoBound)]
+	#[derive(DefaultNoBound)]
 	pub struct GenesisConfig<T: Config> {
 		/// The mixnode set for the very first session.
 		pub mixnodes: BoundedVec<BoundedMixnodeFor<T>, T::MaxAuthorities>,
@@ -308,7 +300,7 @@ pub mod pallet {
 
 		fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity {
 			let Self::Call::register { registration, signature } = call else {
-				return InvalidTransaction::Call.into()
+				return InvalidTransaction::Call.into();
 			};
 
 			// Check session index matches
@@ -320,16 +312,16 @@ pub mod pallet {
 
 			// Check authority index is valid
 			if registration.authority_index >= T::MaxAuthorities::get() {
-				return InvalidTransaction::BadProof.into()
+				return InvalidTransaction::BadProof.into();
 			}
 			let Some(authority_id) = NextAuthorityIds::<T>::get(registration.authority_index)
 			else {
-				return InvalidTransaction::BadProof.into()
+				return InvalidTransaction::BadProof.into();
 			};
 
 			// Check the authority hasn't registered a mixnode yet
 			if Self::already_registered(registration.session_index, registration.authority_index) {
-				return InvalidTransaction::Stale.into()
+				return InvalidTransaction::Stale.into();
 			}
 
 			// Check signature. Note that we don't use regular signed transactions for registration
@@ -339,7 +331,7 @@ pub mod pallet {
 				authority_id.verify(&encoded_registration, signature)
 			});
 			if !signature_ok {
-				return InvalidTransaction::BadProof.into()
+				return InvalidTransaction::BadProof.into();
 			}
 
 			ValidTransaction::with_tag_prefix("MixnetRegistration")
@@ -368,12 +360,12 @@ impl<T: Config> Pallet<T> {
 			.saturating_sub(CurrentSessionStartBlock::<T>::get());
 		let Some(block_in_phase) = block_in_phase.checked_sub(&T::NumCoverToCurrentBlocks::get())
 		else {
-			return SessionPhase::CoverToCurrent
+			return SessionPhase::CoverToCurrent;
 		};
 		let Some(block_in_phase) =
 			block_in_phase.checked_sub(&T::NumRequestsToCurrentBlocks::get())
 		else {
-			return SessionPhase::RequestsToCurrent
+			return SessionPhase::RequestsToCurrent;
 		};
 		if block_in_phase < T::NumCoverToPrevBlocks::get() {
 			SessionPhase::CoverToPrev
@@ -411,7 +403,7 @@ impl<T: Config> Pallet<T> {
 			return Err(MixnodesErr::InsufficientRegistrations {
 				num: 0,
 				min: T::MinMixnodes::get(),
-			})
+			});
 		};
 		Self::mixnodes(prev_session_index)
 	}
@@ -430,7 +422,7 @@ impl<T: Config> Pallet<T> {
 		// registering
 		let block_in_session = block_number.saturating_sub(CurrentSessionStartBlock::<T>::get());
 		if block_in_session < T::NumRegisterStartSlackBlocks::get() {
-			return false
+			return false;
 		}
 
 		let (Some(end_block), _weight) =
@@ -438,7 +430,7 @@ impl<T: Config> Pallet<T> {
 		else {
 			// Things aren't going to work terribly well in this case as all the authorities will
 			// just pile in after the slack period...
-			return true
+			return true;
 		};
 
 		let remaining_blocks = end_block
@@ -447,7 +439,7 @@ impl<T: Config> Pallet<T> {
 		if remaining_blocks.is_zero() {
 			// Into the slack time at the end of the session. Not necessarily too late;
 			// registrations are accepted right up until the session ends.
-			return true
+			return true;
 		}
 
 		// Want uniform distribution over the remaining blocks, so pick this block with probability
@@ -496,7 +488,7 @@ impl<T: Config> Pallet<T> {
 				"Session {session_index} registration attempted, \
 				but current session is {current_session_index}",
 			);
-			return false
+			return false;
 		}
 
 		let block_number = frame_system::Pallet::<T>::block_number();
@@ -505,7 +497,7 @@ impl<T: Config> Pallet<T> {
 				target: LOG_TARGET,
 				"Waiting for the session to progress further before registering",
 			);
-			return false
+			return false;
 		}
 
 		let Some((authority_index, authority_id)) = Self::next_local_authority() else {
@@ -513,7 +505,7 @@ impl<T: Config> Pallet<T> {
 				target: LOG_TARGET,
 				"Not an authority in the next session; cannot register a mixnode",
 			);
-			return false
+			return false;
 		};
 
 		if Self::already_registered(session_index, authority_index) {
@@ -521,14 +513,14 @@ impl<T: Config> Pallet<T> {
 				target: LOG_TARGET,
 				"Already registered a mixnode for the next session",
 			);
-			return false
+			return false;
 		}
 
 		let registration =
 			Registration { block_number, session_index, authority_index, mixnode: mixnode.into() };
 		let Some(signature) = authority_id.sign(&registration.encode()) else {
 			log::debug!(target: LOG_TARGET, "Failed to sign registration");
-			return false
+			return false;
 		};
 		let call = Call::register { registration, signature };
 		let xt = T::create_inherent(call.into());
diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs
index 8031ddf96e6..b3e340cbcbf 100644
--- a/substrate/frame/src/lib.rs
+++ b/substrate/frame/src/lib.rs
@@ -203,12 +203,18 @@ pub mod prelude {
 	/// Dispatch types from `frame-support`, other fundamental traits
 	#[doc(no_inline)]
 	pub use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo};
-	pub use frame_support::traits::{Contains, IsSubType, OnRuntimeUpgrade};
+	pub use frame_support::traits::{
+		Contains, EstimateNextSessionRotation, IsSubType, OnRuntimeUpgrade, OneSessionHandler,
+	};
 
 	/// Pallet prelude of `frame-system`.
 	#[doc(no_inline)]
 	pub use frame_system::pallet_prelude::*;
 
+	/// Transaction related helpers to submit transactions.
+	#[doc(no_inline)]
+	pub use frame_system::offchain::*;
+
 	/// All FRAME-relevant derive macros.
 	#[doc(no_inline)]
 	pub use super::derive::*;
@@ -216,6 +222,9 @@ pub mod prelude {
 	/// All hashing related things
 	pub use super::hashing::*;
 
+	/// All arithmetic types and traits used for safe math.
+	pub use super::arithmetic::*;
+
 	/// Runtime traits
 	#[doc(no_inline)]
 	pub use sp_runtime::traits::{
@@ -223,9 +232,11 @@ pub mod prelude {
 		Saturating, StaticLookup, TrailingZeroInput,
 	};
 
-	/// Other error/result types for runtime
+	/// Other runtime types and traits
 	#[doc(no_inline)]
-	pub use sp_runtime::{DispatchErrorWithPostInfo, DispatchResultWithInfo, TokenError};
+	pub use sp_runtime::{
+		BoundToRuntimeAppPublic, DispatchErrorWithPostInfo, DispatchResultWithInfo, TokenError,
+	};
 }
 
 #[cfg(any(feature = "try-runtime", test))]
@@ -509,6 +520,8 @@ pub mod traits {
 }
 
 /// The arithmetic types used for safe math.
+///
+/// This is already part of the [`prelude`].
 pub mod arithmetic {
 	pub use sp_arithmetic::{traits::*, *};
 }
diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml
index f36d39d63f6..d2a47ade7f8 100644
--- a/umbrella/Cargo.toml
+++ b/umbrella/Cargo.toml
@@ -290,7 +290,6 @@ runtime-benchmarks = [
 	"pallet-membership?/runtime-benchmarks",
 	"pallet-message-queue?/runtime-benchmarks",
 	"pallet-migrations?/runtime-benchmarks",
-	"pallet-mixnet?/runtime-benchmarks",
 	"pallet-mmr?/runtime-benchmarks",
 	"pallet-multisig?/runtime-benchmarks",
 	"pallet-nft-fractionalization?/runtime-benchmarks",
-- 
GitLab


From 659f4848a7564c45d8d3a3d13c7596801050da82 Mon Sep 17 00:00:00 2001
From: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
Date: Fri, 3 Jan 2025 13:29:29 +0100
Subject: [PATCH 014/116] [docs] Fix release naming (#7032)

- **[docs] Fix release naming**
- **Remove outdated and unmaintained file**

Closes https://github.com/paritytech/polkadot-sdk/issues/6998

---------

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
---
 README.md               |   4 +-
 cumulus/docs/release.md | 135 ----------------------------------------
 docs/RELEASE.md         |   6 +-
 3 files changed, 7 insertions(+), 138 deletions(-)
 delete mode 100644 cumulus/docs/release.md

diff --git a/README.md b/README.md
index 6c0dfbb2e7e..24352cc28a1 100644
--- a/README.md
+++ b/README.md
@@ -40,9 +40,9 @@ curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/paritytec
 <!-- markdownlint-disable-next-line MD013 -->
 ![Current Stable Release](https://raw.githubusercontent.com/paritytech/release-registry/main/badges/polkadot-sdk-latest.svg)&nbsp;&nbsp;![Next Stable Release](https://raw.githubusercontent.com/paritytech/release-registry/main/badges/polkadot-sdk-next.svg)
 
-The Polkadot SDK is released every three months as a `stableYYMMDD` release. They are supported for
+The Polkadot SDK is released every three months as a `stableYYMM` release. They are supported for
 one year with patches. See the next upcoming versions in the [Release
-Registry](https://github.com/paritytech/release-registry/).
+Registry](https://github.com/paritytech/release-registry/) and more docs in [RELEASE.md](./docs/RELEASE.md).
 
 You can use [`psvm`](https://github.com/paritytech/psvm) to update all dependencies to a specific
 version without needing to manually select the correct version for each crate.
diff --git a/cumulus/docs/release.md b/cumulus/docs/release.md
deleted file mode 100644
index 8302b7b9b7f..00000000000
--- a/cumulus/docs/release.md
+++ /dev/null
@@ -1,135 +0,0 @@
-# Releases
-
-## Versioning
-
-### Example #1
-
-```
-| Polkadot   | v  0. 9.22    |
-| Client     | v  0. 9.22 0  |
-| Runtime    | v     9 22 0  |  =>  9220
-| semver     |    0. 9.22 0  |
-```
-
-### Example #2
-
-```
-| Polkadot   | v  0.10.42    |
-| Client     | v  0.10.42 0  |
-| Runtime    | v    10.42 0  |  => 10420
-| semver     |    0.10.42 0  |
-```
-
-### Example #3
-
-```
-| Polkadot   | v  1. 2.18    |
-| Client     | v  1. 2.18 0  |
-| Runtime    | v  1  2 18 0  |  => 102180
-| semver     |    1. 2.18 0  |
-```
-
-
-This document contains information related to the releasing process and describes a few of the steps and checks that are
-performed during the release process.
-
-## Client
-
-### <a name="burnin"></a>Burn In
-
-Ensure that Parity DevOps has run the new release on Westend and Kusama Asset Hub collators for 12h prior to publishing
-the release.
-
-### Build Artifacts
-
-Add any necessary assets to the release. They should include:
-
-- Linux binaries
-    - GPG signature
-    - SHA256 checksum
-- WASM binaries of the runtimes
-- Source code
-
-
-## Runtimes
-
-### Spec Version
-
-A new runtime release must bump the `spec_version`. This may follow a pattern with the client release (e.g. runtime
-v9220 corresponds to v0.9.22).
-
-### Runtime version bump between RCs
-
-The clients need to be aware of runtime changes. However, we do not want to bump the `spec_version` for every single
-release candidate. Instead, we can bump the `impl` field of the version to signal the change to the client. This applies
-only to runtimes that have been deployed.
-
-### Old Migrations Removed
-
-Previous `on_runtime_upgrade` functions from old upgrades should be removed.
-
-### New Migrations
-
-Ensure that any migrations that are required due to storage or logic changes are included in the `on_runtime_upgrade`
-function of the appropriate pallets.
-
-### Extrinsic Ordering & Storage
-
-Offline signing libraries depend on a consistent ordering of call indices and functions. Compare the metadata of the
-current and new runtimes and ensure that the `module index, call index` tuples map to the same set of functions. It also
-checks if there have been any changes in `storage`. In case of a breaking change, increase `transaction_version`.
-
-To verify the order has not changed, manually start the following
-[Github Action](https://github.com/paritytech/polkadot-sdk/cumulus/.github/workflows/release-20_extrinsic-ordering-check-from-bin.yml).
-It takes around a minute to run and will produce the report as artifact you need to manually check.
-
-To run it, in the _Run Workflow_ dropdown:
-1. **Use workflow from**: to ignore, leave `master` as default
-2. **The WebSocket url of the reference node**: - Asset Hub Polkadot: `wss://statemint-rpc.polkadot.io`
-    - Asset Hub Kusama: `wss://statemine-rpc.polkadot.io`
-    - Asset Hub Westend: `wss://westmint-rpc.polkadot.io`
-3. **A url to a Linux binary for the node containing the runtime to test**: Paste the URL of the latest
-   release-candidate binary from the draft-release on Github. The binary has to previously be uploaded to S3 (Github url
-   link to the binary is constantly changing)
-    - E.g: https://releases.parity.io/cumulus/v0.9.270-rc3/polkadot-parachain
-4. **The name of the chain under test. Usually, you would pass a local chain**: - Asset Hub Polkadot:
-	`asset-hub-polkadot-local`
-    - Asset Hub Kusama: `asset-hub-kusama-local`
-    - Asset Hub Westend: `asset-hub-westend-local`
-5. Click **Run workflow**
-
-When the workflow is done, click on it and download the zip artifact, inside you'll find an `output.txt` file. The
-things to look for in the output are lines like:
-
-- `[Identity] idx 28 -> 25 (calls 15)` - indicates the index for Identity has changed
-- `[+] Society, Recovery` - indicates the new version includes 2 additional modules/pallets.
-- If no indices have changed, every modules line should look something like `[Identity] idx 25 (calls 15)`
-
-**Note**: Adding new functions to the runtime does not constitute a breaking change as long as the indexes did not
-change.
-
-**Note**: Extrinsic function signatures changes (adding/removing & ordering arguments) are not caught by the job, so
-those changes should be reviewed "manually"
-
-### Benchmarks
-
-The Benchmarks can now be started from the CI. First find the CI pipeline from
-[here](https://gitlab.parity.io/parity/mirrors/cumulus/-/pipelines?page=1&scope=all&ref=release-parachains-v9220) and
-pick the latest. [Guide](https://github.com/paritytech/ci_cd/wiki/Benchmarks:-cumulus)
-
-### Integration Tests
-
-Until https://github.com/paritytech/ci_cd/issues/499 is done, tests will have to be run manually.
-1. Go to https://github.com/paritytech/parachains-integration-tests and check out the release branch. E.g.
-https://github.com/paritytech/parachains-integration-tests/tree/release-v9270-v0.9.27 for `release-parachains-v0.9.270`
-2. Clone `release-parachains-<version>` branch from Cumulus
-3. `cargo build --release`
-4. Copy `./target/polkadot-parachain` to `./bin`
-5. Clone `it/release-<version>-fast-sudo` from Polkadot In case the branch does not exists (it is a manual process):
-	cherry pick `paritytech/polkadot@791c8b8` and run:
-	`find . -type f -name "*.toml" -print0 | xargs -0 sed -i '' -e 's/polkadot-vX.X.X/polkadot-v<version>/g'`
-6. `cargo build --release --features fast-runtime`
-7. Copy `./target/polkadot` into `./bin` (in Cumulus)
-8. Run the tests:
-   - Asset Hub Polkadot: `yarn zombienet-test -c ./examples/statemint/config.toml -t ./examples/statemint`
-   - Asset Hub Kusama: `yarn zombienet-test -c ./examples/statemine/config.toml -t ./examples/statemine`
diff --git a/docs/RELEASE.md b/docs/RELEASE.md
index bea36741135..677cb5465b6 100644
--- a/docs/RELEASE.md
+++ b/docs/RELEASE.md
@@ -14,7 +14,11 @@ Merging to it is restricted to [Backports](#backports).
 
 We are releasing multiple different things from this repository in one release, but we don't want to use the same
 version for everything. Thus, in the following we explain the versioning story for the crates, node and Westend &
-Rococo. To easily refer to a release, it shall be named by its date in the form `stableYYMMDD`.
+Rococo.
+
+To easily refer to a release, it shall be named by its date in the form `stableYYMM`. Patches to stable releases are
+tagged in the form of `stableYYMM-PATCH`, with `PATCH` ranging from 1 to 99. For example, the fourth patch to
+`stable2409` would be `stable2409-4`.
 
 ## Crate
 
-- 
GitLab


From 721f6d97613b0ece9c8414e8ec8ba31d2f67d40c Mon Sep 17 00:00:00 2001
From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com>
Date: Fri, 3 Jan 2025 14:19:18 +0100
Subject: [PATCH 015/116] [WIP] Fix networking-benchmarks (#7036)

cc https://github.com/paritytech/ci_cd/issues/1094
---
 ...nchmarks.yml => benchmarks-networking.yml} | 20 ++++++++++---------
 ...enchmarks.yml => benchmarks-subsystem.yml} |  0
 2 files changed, 11 insertions(+), 9 deletions(-)
 rename .github/workflows/{networking-benchmarks.yml => benchmarks-networking.yml} (86%)
 rename .github/workflows/{subsystem-benchmarks.yml => benchmarks-subsystem.yml} (100%)

diff --git a/.github/workflows/networking-benchmarks.yml b/.github/workflows/benchmarks-networking.yml
similarity index 86%
rename from .github/workflows/networking-benchmarks.yml
rename to .github/workflows/benchmarks-networking.yml
index e45ae601105..79494b9a015 100644
--- a/.github/workflows/networking-benchmarks.yml
+++ b/.github/workflows/benchmarks-networking.yml
@@ -17,7 +17,7 @@ jobs:
     uses: ./.github/workflows/reusable-preflight.yml
 
   build:
-    timeout-minutes: 80
+    timeout-minutes: 50
     needs: [preflight]
     runs-on: ${{ needs.preflight.outputs.RUNNER_BENCHMARK }}
     container:
@@ -27,12 +27,8 @@ jobs:
       matrix:
         features:
           [
-            {
-              bench: "notifications_protocol",
-            },
-            {
-              bench: "request_response_protocol",
-            },
+            { bench: "notifications_protocol" },
+            { bench: "request_response_protocol" },
           ]
     steps:
       - name: Checkout
@@ -42,7 +38,7 @@ jobs:
         id: run-benchmarks
         run: |
           mkdir -p ./charts
-          forklift cargo bench -p sc-network --bench ${{ matrix.features.bench }} -- --output-format bencher | grep "^test" | tee ./charts/networking-bench.txt || echo "Benchmarks failed"
+          forklift cargo bench -p sc-network --bench ${{ matrix.features.bench }} -- --output-format bencher | grep "^test" | tee ./charts/${{ matrix.features.bench }}.txt || echo "Benchmarks failed"
           ls -lsa ./charts
 
       - name: Upload artifacts
@@ -69,7 +65,13 @@ jobs:
       - name: Download artifacts
         uses: actions/download-artifact@v4.1.8
         with:
-          name: networking-bench-${{ github.sha }}
+          name: notifications_protocol-${{ github.sha }}
+          path: ./charts
+
+      - name: Download artifacts
+        uses: actions/download-artifact@v4.1.8
+        with:
+          name: request_response_protocol-${{ github.sha }}
           path: ./charts
 
       - name: Setup git
diff --git a/.github/workflows/subsystem-benchmarks.yml b/.github/workflows/benchmarks-subsystem.yml
similarity index 100%
rename from .github/workflows/subsystem-benchmarks.yml
rename to .github/workflows/benchmarks-subsystem.yml
-- 
GitLab


From 0b4f131b000e01f1aca3f023937a36dcc281d5e2 Mon Sep 17 00:00:00 2001
From: Qiwei Yang <yangqiwei97@gmail.com>
Date: Sat, 4 Jan 2025 06:22:12 +0800
Subject: [PATCH 016/116] Replace duplicated whitelist with
 whitelisted_storage_keys (#7024)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

related issue: #7018

replaced duplicated whitelists with
`AllPalletsWithSystem::whitelisted_storage_keys();` in this PR

---------

Co-authored-by: Guillaume Thiolliere <gui.thiolliere@gmail.com>
Co-authored-by: Bastian Köcher <git@kchr.de>
---
 .../runtimes/assets/asset-hub-rococo/src/lib.rs  | 16 ++--------------
 .../runtimes/assets/asset-hub-westend/src/lib.rs | 16 ++--------------
 .../bridge-hubs/bridge-hub-rococo/src/lib.rs     | 14 ++------------
 .../bridge-hubs/bridge-hub-westend/src/lib.rs    | 14 ++------------
 .../collectives/collectives-westend/src/lib.rs   | 14 ++------------
 .../contracts/contracts-rococo/src/lib.rs        | 14 ++------------
 .../runtimes/coretime/coretime-rococo/src/lib.rs | 14 ++------------
 .../coretime/coretime-westend/src/lib.rs         | 14 ++------------
 .../runtimes/people/people-rococo/src/lib.rs     | 14 ++------------
 .../runtimes/people/people-westend/src/lib.rs    | 14 ++------------
 .../runtimes/testing/penpal/src/lib.rs           | 14 ++------------
 11 files changed, 22 insertions(+), 136 deletions(-)

diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
index dd153582615..8f4ae4670ac 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
@@ -1854,20 +1854,8 @@ impl_runtime_apis! {
 
 			type ToWestend = XcmBridgeHubRouterBench<Runtime, ToWestendXcmRouterInstance>;
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-				//TODO: use from relay_well_known_keys::ACTIVE_CONFIG
-				hex_literal::hex!("06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
index 707d1c52f74..26ef3219a1e 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
@@ -2030,20 +2030,8 @@ impl_runtime_apis! {
 
 			type ToRococo = XcmBridgeHubRouterBench<Runtime, ToRococoXcmRouterInstance>;
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-				//TODO: use from relay_well_known_keys::ACTIVE_CONFIG
-				hex_literal::hex!("06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
index 492b731610c..88146cecb9e 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
@@ -1498,18 +1498,8 @@ impl_runtime_apis! {
 				}
 			}
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
index edf79ea0c31..1ca709f0d8c 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
@@ -1315,18 +1315,8 @@ impl_runtime_apis! {
 				}
 			}
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
index 5c2ba2e24c2..d3cd285ba67 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
@@ -1139,18 +1139,8 @@ impl_runtime_apis! {
 				}
 			}
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
index 594c9b26f57..be369565dba 100644
--- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
@@ -849,18 +849,8 @@ impl_runtime_apis! {
 				}
 			}
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
index e8f6e6659e1..c4d43e4361f 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
@@ -1140,18 +1140,8 @@ impl_runtime_apis! {
 			type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::<Runtime>;
 			type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::<Runtime>;
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
index ce965f0ad1b..431bfc8a63b 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
@@ -1135,18 +1135,8 @@ impl_runtime_apis! {
 			type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::<Runtime>;
 			type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::<Runtime>;
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
index b8db687da62..ef3c90ace82 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
@@ -1055,18 +1055,8 @@ impl_runtime_apis! {
 			type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::<Runtime>;
 			type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::<Runtime>;
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
index 620ec41c071..ebf8fcb33bd 100644
--- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
@@ -1055,18 +1055,8 @@ impl_runtime_apis! {
 			type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::<Runtime>;
 			type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::<Runtime>;
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
index b51670c792d..51dc95bf2c7 100644
--- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
+++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
@@ -1132,18 +1132,8 @@ impl_runtime_apis! {
 			use cumulus_pallet_session_benchmarking::Pallet as SessionBench;
 			impl cumulus_pallet_session_benchmarking::Config for Runtime {}
 
-			let whitelist: Vec<TrackedStorageKey> = vec![
-				// Block Number
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
-				// Total Issuance
-				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
-				// Execution Phase
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
-				// Event Count
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
-				// System Events
-				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
-			];
+			use frame_support::traits::WhitelistedStorageKeys;
+			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
 			let mut batches = Vec::<BenchmarkBatch>::new();
 			let params = (&config, &whitelist);
-- 
GitLab


From b5a5ac4487890046d226bedb0238eaccb423ae42 Mon Sep 17 00:00:00 2001
From: Guillaume Thiolliere <gui.thiolliere@gmail.com>
Date: Sat, 4 Jan 2025 11:03:30 +0900
Subject: [PATCH 017/116] Make `TransactionExtension` tuple of tuple
 transparent for implication (#7028)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Currently `(A, B, C)` and `((A, B), C)` change the order of implications
in the transaction extension pipeline. This order is not accessible in
the metadata, because the metadata is just a vector of transaction
extension, the nested structure is not visible.

This PR make the implementation for tuple of `TransactionExtension`
better for tuple of tuple. `(A, B, C)` and `((A, B), C)` don't change
the implication for the validation A.

This is a breaking change but only when using the trait
`TransactionExtension` the code implementing the trait is not breaking
(surprising rust behavior but fine).

---------

Co-authored-by: command-bot <>
Co-authored-by: Bastian Köcher <git@kchr.de>
---
 prdoc/pr_7028.prdoc                           |  25 ++
 .../src/extensions/check_non_zero_sender.rs   |   4 +-
 .../system/src/extensions/check_nonce.rs      |   6 +-
 .../skip-feeless-payment/src/lib.rs           |   5 +-
 .../primitives/runtime/src/traits/mod.rs      |   3 +-
 .../dispatch_transaction.rs                   |   2 +-
 .../src/traits/transaction_extension/mod.rs   | 258 +++++++++++++++++-
 7 files changed, 279 insertions(+), 24 deletions(-)
 create mode 100644 prdoc/pr_7028.prdoc

diff --git a/prdoc/pr_7028.prdoc b/prdoc/pr_7028.prdoc
new file mode 100644
index 00000000000..ead918fc2e0
--- /dev/null
+++ b/prdoc/pr_7028.prdoc
@@ -0,0 +1,25 @@
+title: 'Fix implication order in implementation of `TransactionExtension` for tuple'
+doc:
+- audience:
+  - Runtime Dev
+  - Runtime User
+  description: |-
+    Before this PR, the implications were different in the pipeline `(A, B, C)` and `((A, B), C)`.
+    This PR fixes this behavior and make nested tuple transparant, the implication order of tuple of
+    tuple is now the same as in a single tuple.
+
+    For runtime users this mean that the implication can be breaking depending on the pipeline used
+    in the runtime.
+
+    For runtime developers this breaks usage of `TransactionExtension::validate`.
+    When calling `TransactionExtension::validate` the implication must now implement `Implication`
+    trait, you can use `TxBaseImplication` to wrap the type and use it as the base implication.
+    E.g. instead of `&(extension_version, call),` you can write `&TxBaseImplication((extension_version, call))`.
+
+crates:
+- name: sp-runtime
+  bump: major
+- name: pallet-skip-feeless-payment
+  bump: major
+- name: frame-system
+  bump: major
diff --git a/substrate/frame/system/src/extensions/check_non_zero_sender.rs b/substrate/frame/system/src/extensions/check_non_zero_sender.rs
index 577e2b324fc..978eebaf3da 100644
--- a/substrate/frame/system/src/extensions/check_non_zero_sender.rs
+++ b/substrate/frame/system/src/extensions/check_non_zero_sender.rs
@@ -86,7 +86,7 @@ mod tests {
 	use crate::mock::{new_test_ext, Test, CALL};
 	use frame_support::{assert_ok, dispatch::DispatchInfo};
 	use sp_runtime::{
-		traits::{AsTransactionAuthorizedOrigin, DispatchTransaction},
+		traits::{AsTransactionAuthorizedOrigin, DispatchTransaction, TxBaseImplication},
 		transaction_validity::{TransactionSource::External, TransactionValidityError},
 	};
 
@@ -118,7 +118,7 @@ mod tests {
 			let info = DispatchInfo::default();
 			let len = 0_usize;
 			let (_, _, origin) = CheckNonZeroSender::<Test>::new()
-				.validate(None.into(), CALL, &info, len, (), CALL, External)
+				.validate(None.into(), CALL, &info, len, (), &TxBaseImplication(CALL), External)
 				.unwrap();
 			assert!(!origin.is_transaction_authorized());
 		})
diff --git a/substrate/frame/system/src/extensions/check_nonce.rs b/substrate/frame/system/src/extensions/check_nonce.rs
index 004ec08a26f..bc19a09e06a 100644
--- a/substrate/frame/system/src/extensions/check_nonce.rs
+++ b/substrate/frame/system/src/extensions/check_nonce.rs
@@ -186,7 +186,7 @@ mod tests {
 		assert_ok, assert_storage_noop, dispatch::GetDispatchInfo, traits::OriginTrait,
 	};
 	use sp_runtime::{
-		traits::{AsTransactionAuthorizedOrigin, DispatchTransaction},
+		traits::{AsTransactionAuthorizedOrigin, DispatchTransaction, TxBaseImplication},
 		transaction_validity::TransactionSource::External,
 	};
 
@@ -335,7 +335,7 @@ mod tests {
 			let info = DispatchInfo::default();
 			let len = 0_usize;
 			let (_, val, origin) = CheckNonce::<Test>(1u64.into())
-				.validate(None.into(), CALL, &info, len, (), CALL, External)
+				.validate(None.into(), CALL, &info, len, (), &TxBaseImplication(CALL), External)
 				.unwrap();
 			assert!(!origin.is_transaction_authorized());
 			assert_ok!(CheckNonce::<Test>(1u64.into()).prepare(val, &origin, CALL, &info, len));
@@ -359,7 +359,7 @@ mod tests {
 			let len = 0_usize;
 			// run the validation step
 			let (_, val, origin) = CheckNonce::<Test>(1u64.into())
-				.validate(Some(1).into(), CALL, &info, len, (), CALL, External)
+				.validate(Some(1).into(), CALL, &info, len, (), &TxBaseImplication(CALL), External)
 				.unwrap();
 			// mutate `AccountData` for the caller
 			crate::Account::<Test>::mutate(1, |info| {
diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs
index dd907f6fcbb..5ba1d129767 100644
--- a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs
+++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs
@@ -46,7 +46,8 @@ use frame_support::{
 use scale_info::{StaticTypeInfo, TypeInfo};
 use sp_runtime::{
 	traits::{
-		DispatchInfoOf, DispatchOriginOf, PostDispatchInfoOf, TransactionExtension, ValidateResult,
+		DispatchInfoOf, DispatchOriginOf, Implication, PostDispatchInfoOf, TransactionExtension,
+		ValidateResult,
 	},
 	transaction_validity::TransactionValidityError,
 };
@@ -147,7 +148,7 @@ where
 		info: &DispatchInfoOf<T::RuntimeCall>,
 		len: usize,
 		self_implicit: S::Implicit,
-		inherited_implication: &impl Encode,
+		inherited_implication: &impl Implication,
 		source: TransactionSource,
 	) -> ValidateResult<Self::Val, T::RuntimeCall> {
 		if call.is_feeless(&origin) {
diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs
index cfcc3e5a354..d371152dc40 100644
--- a/substrate/primitives/runtime/src/traits/mod.rs
+++ b/substrate/primitives/runtime/src/traits/mod.rs
@@ -55,7 +55,8 @@ use std::str::FromStr;
 
 pub mod transaction_extension;
 pub use transaction_extension::{
-	DispatchTransaction, TransactionExtension, TransactionExtensionMetadata, ValidateResult,
+	DispatchTransaction, Implication, ImplicationParts, TransactionExtension,
+	TransactionExtensionMetadata, TxBaseImplication, ValidateResult,
 };
 
 /// A lazy value.
diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs
index 28030d12fc9..1fbaab0d45a 100644
--- a/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs
+++ b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs
@@ -111,7 +111,7 @@ where
 			info,
 			len,
 			self.implicit()?,
-			&(extension_version, call),
+			&TxBaseImplication((extension_version, call)),
 			source,
 		) {
 			// After validation, some origin must have been authorized.
diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs
index f8c5dc6a724..27f33acb69c 100644
--- a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs
+++ b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs
@@ -43,6 +43,72 @@ mod dispatch_transaction;
 pub use as_transaction_extension::AsTransactionExtension;
 pub use dispatch_transaction::DispatchTransaction;
 
+/// Provides `Sealed` trait.
+mod private {
+	/// Special trait that prevents the implementation of some traits outside of this crate.
+	pub trait Sealed {}
+}
+
+/// The base implication in a transaction.
+///
+/// This struct is used to represent the base implication in the transaction, that is
+/// the implication not part of any transaction extensions. It usually comprises of the call and
+/// the transaction extension version.
+///
+/// The concept of implication in the transaction extension pipeline is explained in the trait
+/// documentation: [`TransactionExtension`].
+#[derive(Encode)]
+pub struct TxBaseImplication<T>(pub T);
+
+impl<T: Encode> Implication for TxBaseImplication<T> {
+	fn parts(&self) -> ImplicationParts<&impl Encode, &impl Encode, &impl Encode> {
+		ImplicationParts { base: self, explicit: &(), implicit: &() }
+	}
+}
+
+impl<T> private::Sealed for TxBaseImplication<T> {}
+
+/// The implication in a transaction.
+///
+/// The concept of implication in the transaction extension pipeline is explained in the trait
+/// documentation: [`TransactionExtension`].
+#[derive(Encode)]
+pub struct ImplicationParts<Base, Explicit, Implicit> {
+	/// The base implication, that is implication not part of any transaction extension, usually
+	/// the call and the transaction extension version.
+	pub base: Base,
+	/// The explicit implication in transaction extensions.
+	pub explicit: Explicit,
+	/// The implicit implication in transaction extensions.
+	pub implicit: Implicit,
+}
+
+impl<Base: Encode, Explicit: Encode, Implicit: Encode> Implication
+	for ImplicationParts<Base, Explicit, Implicit>
+{
+	fn parts(&self) -> ImplicationParts<&impl Encode, &impl Encode, &impl Encode> {
+		ImplicationParts { base: &self.base, explicit: &self.explicit, implicit: &self.implicit }
+	}
+}
+
+impl<Base, Explicit, Implicit> private::Sealed for ImplicationParts<Base, Explicit, Implicit> {}
+
+/// Interface of implications in the transaction extension pipeline.
+///
+/// Implications can be encoded, this is useful for checking signature on the implications.
+/// Implications can be split into parts, this allow to destructure and restructure the
+/// implications, this is useful for nested pipeline.
+///
+/// This trait is sealed, consider using [`TxBaseImplication`] and [`ImplicationParts`]
+/// implementations.
+///
+/// The concept of implication in the transaction extension pipeline is explained in the trait
+/// documentation: [`TransactionExtension`].
+pub trait Implication: Encode + private::Sealed {
+	/// Destructure the implication into its parts.
+	fn parts(&self) -> ImplicationParts<&impl Encode, &impl Encode, &impl Encode>;
+}
+
 /// Shortcut for the result value of the `validate` function.
 pub type ValidateResult<Val, Call> =
 	Result<(ValidTransaction, Val, DispatchOriginOf<Call>), TransactionValidityError>;
@@ -244,7 +310,7 @@ pub trait TransactionExtension<Call: Dispatchable>:
 		info: &DispatchInfoOf<Call>,
 		len: usize,
 		self_implicit: Self::Implicit,
-		inherited_implication: &impl Encode,
+		inherited_implication: &impl Implication,
 		source: TransactionSource,
 	) -> ValidateResult<Self::Val, Call>;
 
@@ -499,7 +565,7 @@ impl<Call: Dispatchable> TransactionExtension<Call> for Tuple {
 		info: &DispatchInfoOf<Call>,
 		len: usize,
 		self_implicit: Self::Implicit,
-		inherited_implication: &impl Encode,
+		inherited_implication: &impl Implication,
 		source: TransactionSource,
 	) -> Result<
 		(ValidTransaction, Self::Val, <Call as Dispatchable>::RuntimeOrigin),
@@ -510,23 +576,20 @@ impl<Call: Dispatchable> TransactionExtension<Call> for Tuple {
 		let following_explicit_implications = for_tuples!( ( #( &self.Tuple ),* ) );
 		let following_implicit_implications = self_implicit;
 
+		let implication_parts = inherited_implication.parts();
+
 		for_tuples!(#(
 			// Implication of this pipeline element not relevant for later items, so we pop it.
 			let (_item, following_explicit_implications) = following_explicit_implications.pop_front();
 			let (item_implicit, following_implicit_implications) = following_implicit_implications.pop_front();
 			let (item_valid, item_val, origin) = {
-				let implications = (
-					// The first is the implications born of the fact we return the mutated
-					// origin.
-					inherited_implication,
-					// This is the explicitly made implication born of the fact the new origin is
-					// passed into the next items in this pipeline-tuple.
-					&following_explicit_implications,
-					// This is the implicitly made implication born of the fact the new origin is
-					// passed into the next items in this pipeline-tuple.
-					&following_implicit_implications,
-				);
-				Tuple.validate(origin, call, info, len, item_implicit, &implications, source)?
+				Tuple.validate(origin, call, info, len, item_implicit,
+					&ImplicationParts {
+						base: implication_parts.base,
+						explicit: (&following_explicit_implications, implication_parts.explicit),
+						implicit: (&following_implicit_implications, implication_parts.implicit),
+					},
+					source)?
 			};
 			let valid = valid.combine_with(item_valid);
 			let val = val.push_back(item_val);
@@ -620,7 +683,7 @@ impl<Call: Dispatchable> TransactionExtension<Call> for () {
 		_info: &DispatchInfoOf<Call>,
 		_len: usize,
 		_self_implicit: Self::Implicit,
-		_inherited_implication: &impl Encode,
+		_inherited_implication: &impl Implication,
 		_source: TransactionSource,
 	) -> Result<
 		(ValidTransaction, (), <Call as Dispatchable>::RuntimeOrigin),
@@ -639,3 +702,168 @@ impl<Call: Dispatchable> TransactionExtension<Call> for () {
 		Ok(())
 	}
 }
+
+#[cfg(test)]
+mod test {
+	use super::*;
+
+	#[test]
+	fn test_implications_on_nested_structure() {
+		use scale_info::TypeInfo;
+		use std::cell::RefCell;
+
+		#[derive(Clone, Debug, Eq, PartialEq, Encode, Decode, TypeInfo)]
+		struct MockExtension {
+			also_implicit: u8,
+			explicit: u8,
+		}
+
+		const CALL_IMPLICIT: u8 = 23;
+
+		thread_local! {
+			static COUNTER: RefCell<u8> = RefCell::new(1);
+		}
+
+		impl TransactionExtension<()> for MockExtension {
+			const IDENTIFIER: &'static str = "MockExtension";
+			type Implicit = u8;
+			fn implicit(&self) -> Result<Self::Implicit, TransactionValidityError> {
+				Ok(self.also_implicit)
+			}
+			type Val = ();
+			type Pre = ();
+			fn weight(&self, _call: &()) -> Weight {
+				Weight::zero()
+			}
+			fn prepare(
+				self,
+				_val: Self::Val,
+				_origin: &DispatchOriginOf<()>,
+				_call: &(),
+				_info: &DispatchInfoOf<()>,
+				_len: usize,
+			) -> Result<Self::Pre, TransactionValidityError> {
+				Ok(())
+			}
+			fn validate(
+				&self,
+				origin: DispatchOriginOf<()>,
+				_call: &(),
+				_info: &DispatchInfoOf<()>,
+				_len: usize,
+				self_implicit: Self::Implicit,
+				inherited_implication: &impl Implication,
+				_source: TransactionSource,
+			) -> ValidateResult<Self::Val, ()> {
+				COUNTER.with(|c| {
+					let mut counter = c.borrow_mut();
+
+					assert_eq!(self_implicit, *counter);
+					assert_eq!(
+						self,
+						&MockExtension { also_implicit: *counter, explicit: *counter + 1 }
+					);
+
+					// Implications must be call then 1 to 22 then 1 to 22 odd.
+					let mut assert_implications = Vec::new();
+					assert_implications.push(CALL_IMPLICIT);
+					for i in *counter + 2..23 {
+						assert_implications.push(i);
+					}
+					for i in *counter + 2..23 {
+						if i % 2 == 1 {
+							assert_implications.push(i);
+						}
+					}
+					assert_eq!(inherited_implication.encode(), assert_implications);
+
+					*counter += 2;
+				});
+				Ok((ValidTransaction::default(), (), origin))
+			}
+			fn post_dispatch_details(
+				_pre: Self::Pre,
+				_info: &DispatchInfoOf<()>,
+				_post_info: &PostDispatchInfoOf<()>,
+				_len: usize,
+				_result: &DispatchResult,
+			) -> Result<Weight, TransactionValidityError> {
+				Ok(Weight::zero())
+			}
+		}
+
+		// Test for one nested structure
+
+		let ext = (
+			MockExtension { also_implicit: 1, explicit: 2 },
+			MockExtension { also_implicit: 3, explicit: 4 },
+			(
+				MockExtension { also_implicit: 5, explicit: 6 },
+				MockExtension { also_implicit: 7, explicit: 8 },
+				(
+					MockExtension { also_implicit: 9, explicit: 10 },
+					MockExtension { also_implicit: 11, explicit: 12 },
+				),
+				MockExtension { also_implicit: 13, explicit: 14 },
+				MockExtension { also_implicit: 15, explicit: 16 },
+			),
+			MockExtension { also_implicit: 17, explicit: 18 },
+			(MockExtension { also_implicit: 19, explicit: 20 },),
+			MockExtension { also_implicit: 21, explicit: 22 },
+		);
+
+		let implicit = ext.implicit().unwrap();
+
+		let res = ext
+			.validate(
+				(),
+				&(),
+				&DispatchInfoOf::<()>::default(),
+				0,
+				implicit,
+				&TxBaseImplication(CALL_IMPLICIT),
+				TransactionSource::Local,
+			)
+			.expect("valid");
+
+		assert_eq!(res.0, ValidTransaction::default());
+
+		// Test for another nested structure
+
+		COUNTER.with(|c| {
+			*c.borrow_mut() = 1;
+		});
+
+		let ext = (
+			MockExtension { also_implicit: 1, explicit: 2 },
+			MockExtension { also_implicit: 3, explicit: 4 },
+			MockExtension { also_implicit: 5, explicit: 6 },
+			MockExtension { also_implicit: 7, explicit: 8 },
+			MockExtension { also_implicit: 9, explicit: 10 },
+			MockExtension { also_implicit: 11, explicit: 12 },
+			(
+				MockExtension { also_implicit: 13, explicit: 14 },
+				MockExtension { also_implicit: 15, explicit: 16 },
+				MockExtension { also_implicit: 17, explicit: 18 },
+				MockExtension { also_implicit: 19, explicit: 20 },
+				MockExtension { also_implicit: 21, explicit: 22 },
+			),
+		);
+
+		let implicit = ext.implicit().unwrap();
+
+		let res = ext
+			.validate(
+				(),
+				&(),
+				&DispatchInfoOf::<()>::default(),
+				0,
+				implicit,
+				&TxBaseImplication(CALL_IMPLICIT),
+				TransactionSource::Local,
+			)
+			.expect("valid");
+
+		assert_eq!(res.0, ValidTransaction::default());
+	}
+}
-- 
GitLab


From 63c73bf6db1c8982ad3f2310a40799c5987f8900 Mon Sep 17 00:00:00 2001
From: Guillaume Thiolliere <gui.thiolliere@gmail.com>
Date: Sun, 5 Jan 2025 12:25:52 +0900
Subject: [PATCH 018/116] Implement cumulus StorageWeightReclaim as wrapping
 transaction extension + frame system ReclaimWeight (#6140)

(rebasing of https://github.com/paritytech/polkadot-sdk/pull/5234)

## Issues:

* Transaction extensions have weights and refund weight. So the
reclaiming of unused weight must happen last in the transaction
extension pipeline. Currently it is inside `CheckWeight`.
* cumulus storage weight reclaim transaction extension misses the proof
size of logic happening prior to itself.

## Done:

* a new storage `ExtrinsicWeightReclaimed` in frame-system. Any logic
which attempts to do some reclaim must use this storage to avoid double
reclaim.
* a new function `reclaim_weight` in frame-system pallet: info and post
info in arguments, read the already reclaimed weight, calculate the new
unused weight from info and post info. do the more accurate reclaim if
higher.
* `CheckWeight` is unchanged and still reclaim the weight in post
dispatch
* `ReclaimWeight` is a new transaction extension in frame system. For
solo chains it must be used last in the transactino extension pipeline.
It does the final most accurate reclaim
* `StorageWeightReclaim` is moved from cumulus primitives into its own
pallet (in order to define benchmark) and is changed into a wrapping
transaction extension.
It does the recording of proof size and does the reclaim using this
recording and the info and post info. So parachains don't need to use
`ReclaimWeight`. But also if they use it, there is no bug.

    ```rust
  /// The TransactionExtension to the basic transaction logic.
pub type TxExtension =
cumulus_pallet_weight_reclaim::StorageWeightReclaim<
         Runtime,
         (
                 frame_system::CheckNonZeroSender<Runtime>,
                 frame_system::CheckSpecVersion<Runtime>,
                 frame_system::CheckTxVersion<Runtime>,
                 frame_system::CheckGenesis<Runtime>,
                 frame_system::CheckEra<Runtime>,
                 frame_system::CheckNonce<Runtime>,
                 frame_system::CheckWeight<Runtime>,
pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
                 BridgeRejectObsoleteHeadersAndMessages,

(bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages,),
frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
         ),
  >;
  ```

---------

Co-authored-by: GitHub Action <action@github.com>
Co-authored-by: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com>
Co-authored-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
Co-authored-by: Sebastian Kunert <skunert49@gmail.com>
Co-authored-by: command-bot <>
---
 .github/workflows/runtimes-matrix.json        |    2 +-
 Cargo.lock                                    |   50 +-
 Cargo.toml                                    |    2 +
 cumulus/pallets/weight-reclaim/Cargo.toml     |   63 +
 .../pallets/weight-reclaim/src/benchmarks.rs  |   71 ++
 cumulus/pallets/weight-reclaim/src/lib.rs     |  311 +++++
 cumulus/pallets/weight-reclaim/src/tests.rs   | 1050 +++++++++++++++++
 cumulus/pallets/weight-reclaim/src/weights.rs |   74 ++
 .../assets/asset-hub-rococo/Cargo.toml        |    6 +-
 .../assets/asset-hub-rococo/src/lib.rs        |   32 +-
 .../weights/cumulus_pallet_weight_reclaim.rs  |   67 ++
 .../src/weights/frame_system_extensions.rs    |   87 +-
 .../asset-hub-rococo/src/weights/mod.rs       |    1 +
 .../assets/asset-hub-westend/Cargo.toml       |    6 +-
 .../assets/asset-hub-westend/src/lib.rs       |   34 +-
 .../weights/cumulus_pallet_weight_reclaim.rs  |   67 ++
 .../src/weights/frame_system_extensions.rs    |   87 +-
 .../asset-hub-westend/src/weights/mod.rs      |    1 +
 .../bridge-hubs/bridge-hub-rococo/Cargo.toml  |    6 +-
 .../bridge-hubs/bridge-hub-rococo/src/lib.rs  |  111 +-
 .../weights/cumulus_pallet_weight_reclaim.rs  |   67 ++
 .../src/weights/frame_system_extensions.rs    |  101 +-
 .../bridge-hub-rococo/src/weights/mod.rs      |    1 +
 .../bridge-hub-rococo/tests/snowbridge.rs     |    4 +-
 .../bridge-hub-rococo/tests/tests.rs          |    1 -
 .../bridge-hubs/bridge-hub-westend/Cargo.toml |    6 +-
 .../bridge-hubs/bridge-hub-westend/src/lib.rs |  107 +-
 .../weights/cumulus_pallet_weight_reclaim.rs  |   67 ++
 .../src/weights/frame_system_extensions.rs    |  101 +-
 .../bridge-hub-westend/src/weights/mod.rs     |    1 +
 .../bridge-hub-westend/tests/snowbridge.rs    |    4 +-
 .../bridge-hub-westend/tests/tests.rs         |    1 -
 .../collectives-westend/Cargo.toml            |    6 +-
 .../collectives-westend/src/lib.rs            |   29 +-
 .../weights/cumulus_pallet_weight_reclaim.rs  |   67 ++
 .../src/weights/frame_system_extensions.rs    |  101 +-
 .../collectives-westend/src/weights/mod.rs    |    1 +
 .../contracts/contracts-rococo/Cargo.toml     |    6 +-
 .../contracts/contracts-rococo/src/lib.rs     |   30 +-
 .../coretime/coretime-rococo/Cargo.toml       |    6 +-
 .../coretime/coretime-rococo/src/lib.rs       |   32 +-
 .../weights/cumulus_pallet_weight_reclaim.rs  |   67 ++
 .../src/weights/frame_system_extensions.rs    |   14 +
 .../coretime-rococo/src/weights/mod.rs        |    1 +
 .../coretime/coretime-westend/Cargo.toml      |    6 +-
 .../coretime/coretime-westend/src/lib.rs      |   32 +-
 .../weights/cumulus_pallet_weight_reclaim.rs  |   67 ++
 .../src/weights/frame_system_extensions.rs    |   14 +
 .../coretime-westend/src/weights/mod.rs       |    1 +
 .../glutton/glutton-westend/src/lib.rs        |    1 +
 .../src/weights/frame_system_extensions.rs    |   94 +-
 .../runtimes/people/people-rococo/Cargo.toml  |    6 +-
 .../runtimes/people/people-rococo/src/lib.rs  |   30 +-
 .../weights/cumulus_pallet_weight_reclaim.rs  |   67 ++
 .../src/weights/frame_system_extensions.rs    |   14 +
 .../people/people-rococo/src/weights/mod.rs   |    1 +
 .../runtimes/people/people-westend/Cargo.toml |    6 +-
 .../runtimes/people/people-westend/src/lib.rs |   30 +-
 .../weights/cumulus_pallet_weight_reclaim.rs  |   67 ++
 .../src/weights/frame_system_extensions.rs    |   14 +
 .../people/people-westend/src/weights/mod.rs  |    1 +
 .../runtimes/testing/penpal/src/lib.rs        |    1 +
 .../testing/rococo-parachain/Cargo.toml       |    5 +-
 .../testing/rococo-parachain/src/lib.rs       |   30 +-
 .../storage-weight-reclaim/src/lib.rs         |   33 +-
 .../storage-weight-reclaim/src/tests.rs       |   15 +
 cumulus/test/client/Cargo.toml                |    3 +-
 cumulus/test/client/src/lib.rs                |    3 +-
 cumulus/test/runtime/Cargo.toml               |    4 +-
 cumulus/test/runtime/src/lib.rs               |   27 +-
 cumulus/test/service/Cargo.toml               |    3 +-
 cumulus/test/service/src/lib.rs               |    3 +-
 docs/sdk/Cargo.toml                           |    2 +-
 docs/sdk/src/guides/enable_pov_reclaim.rs     |    6 +-
 .../reference_docs/transaction_extensions.rs  |    8 +-
 polkadot/node/service/src/benchmarking.rs     |    4 +
 polkadot/node/test/service/src/lib.rs         |    2 +
 polkadot/runtime/rococo/src/lib.rs            |    2 +
 .../src/weights/frame_system_extensions.rs    |   93 +-
 polkadot/runtime/test-runtime/src/lib.rs      |    2 +
 polkadot/runtime/westend/src/lib.rs           |    2 +
 .../src/weights/frame_system_extensions.rs    |   92 +-
 .../xcm/xcm-builder/src/tests/pay/mock.rs     |    1 +
 polkadot/xcm/xcm-runtime-apis/tests/mock.rs   |    3 +-
 prdoc/pr_6140.prdoc                           |   95 ++
 substrate/bin/node/cli/src/service.rs         |    5 +
 substrate/bin/node/runtime/src/lib.rs         |    3 +
 substrate/bin/node/testing/src/keyring.rs     |    1 +
 substrate/frame/executive/src/tests.rs        |    5 +
 .../metadata-hash-extension/src/tests.rs      |    1 +
 substrate/frame/src/lib.rs                    |    1 +
 substrate/frame/support/src/dispatch.rs       |   13 +
 .../system/benchmarking/src/extensions.rs     |   46 +-
 .../frame/system/benchmarking/src/mock.rs     |    4 +
 .../system/src/extensions/check_weight.rs     |  142 ++-
 substrate/frame/system/src/extensions/mod.rs  |    1 +
 .../system/src/extensions/weight_reclaim.rs   |  401 +++++++
 .../frame/system/src/extensions/weights.rs    |   23 +
 substrate/frame/system/src/lib.rs             |   56 +-
 substrate/frame/system/src/tests.rs           |   64 +
 .../runtime/src/generic/checked_extrinsic.rs  |    1 -
 .../src/traits/transaction_extension/mod.rs   |    8 +-
 substrate/test-utils/runtime/src/extrinsic.rs |    1 +
 substrate/test-utils/runtime/src/lib.rs       |    1 +
 templates/minimal/runtime/src/lib.rs          |    4 +
 templates/parachain/runtime/Cargo.toml        |    2 +-
 templates/parachain/runtime/src/benchmarks.rs |    1 +
 .../parachain/runtime/src/configs/mod.rs      |    5 +
 templates/parachain/runtime/src/lib.rs        |   28 +-
 templates/solochain/node/src/benchmarking.rs  |    2 +
 templates/solochain/runtime/src/lib.rs        |    1 +
 umbrella/Cargo.toml                           |   10 +-
 umbrella/src/lib.rs                           |    4 +
 113 files changed, 4007 insertions(+), 666 deletions(-)
 create mode 100644 cumulus/pallets/weight-reclaim/Cargo.toml
 create mode 100644 cumulus/pallets/weight-reclaim/src/benchmarks.rs
 create mode 100644 cumulus/pallets/weight-reclaim/src/lib.rs
 create mode 100644 cumulus/pallets/weight-reclaim/src/tests.rs
 create mode 100644 cumulus/pallets/weight-reclaim/src/weights.rs
 create mode 100644 cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
 create mode 100644 cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_weight_reclaim.rs
 create mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
 create mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_weight_reclaim.rs
 create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_weight_reclaim.rs
 create mode 100644 cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
 create mode 100644 cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_weight_reclaim.rs
 create mode 100644 cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
 create mode 100644 cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_weight_reclaim.rs
 create mode 100644 prdoc/pr_6140.prdoc
 create mode 100644 substrate/frame/system/src/extensions/weight_reclaim.rs

diff --git a/.github/workflows/runtimes-matrix.json b/.github/workflows/runtimes-matrix.json
index 104e7352133..ff16b739724 100644
--- a/.github/workflows/runtimes-matrix.json
+++ b/.github/workflows/runtimes-matrix.json
@@ -145,7 +145,7 @@
   {
     "name": "glutton-westend",
     "package": "glutton-westend-runtime",
-    "path": "cumulus/parachains/runtimes/gluttons/glutton-westend",
+    "path": "cumulus/parachains/runtimes/glutton/glutton-westend",
     "header": "cumulus/file_header.txt",
     "template": "cumulus/templates/xcm-bench-template.hbs",
     "bench_features": "runtime-benchmarks",
diff --git a/Cargo.lock b/Cargo.lock
index 3c55a14256c..b0fb0586be3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -959,11 +959,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -1095,11 +1095,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -2666,11 +2666,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -2905,11 +2905,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -3645,11 +3645,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -3952,11 +3952,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -4095,11 +4095,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -4196,11 +4196,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -5074,6 +5074,25 @@ dependencies = [
  "sp-runtime 39.0.2",
 ]
 
+[[package]]
+name = "cumulus-pallet-weight-reclaim"
+version = "1.0.0"
+dependencies = [
+ "cumulus-primitives-proof-size-hostfunction 0.2.0",
+ "cumulus-primitives-storage-weight-reclaim 1.0.0",
+ "derivative",
+ "docify",
+ "frame-benchmarking 28.0.0",
+ "frame-support 28.0.0",
+ "frame-system 28.0.0",
+ "log",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-io 30.0.0",
+ "sp-runtime 31.0.1",
+ "sp-trie 29.0.0",
+]
+
 [[package]]
 name = "cumulus-pallet-xcm"
 version = "0.7.0"
@@ -5524,10 +5543,10 @@ dependencies = [
 name = "cumulus-test-client"
 version = "0.1.0"
 dependencies = [
+ "cumulus-pallet-weight-reclaim",
  "cumulus-primitives-core 0.7.0",
  "cumulus-primitives-parachain-inherent 0.7.0",
  "cumulus-primitives-proof-size-hostfunction 0.2.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-test-relay-sproof-builder 0.7.0",
  "cumulus-test-runtime",
  "cumulus-test-service",
@@ -5589,9 +5608,9 @@ version = "0.1.0"
 dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "frame-executive 28.0.0",
  "frame-support 28.0.0",
  "frame-system 28.0.0",
@@ -5643,8 +5662,8 @@ dependencies = [
  "cumulus-client-pov-recovery",
  "cumulus-client-service",
  "cumulus-pallet-parachain-system 0.7.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-relay-chain-inprocess-interface",
  "cumulus-relay-chain-interface",
  "cumulus-relay-chain-minimal-node",
@@ -16742,11 +16761,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "enumflags2",
  "frame-benchmarking 28.0.0",
@@ -16845,11 +16864,11 @@ dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "enumflags2",
  "frame-benchmarking 28.0.0",
@@ -18645,6 +18664,7 @@ dependencies = [
  "cumulus-pallet-parachain-system-proc-macro 0.6.0",
  "cumulus-pallet-session-benchmarking 9.0.0",
  "cumulus-pallet-solo-to-para 0.7.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-ping 0.7.0",
@@ -19233,8 +19253,8 @@ dependencies = [
  "cumulus-client-service",
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-primitives-proof-size-hostfunction 0.2.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "docify",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
@@ -21447,12 +21467,12 @@ version = "0.6.0"
 dependencies = [
  "cumulus-pallet-aura-ext 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
+ "cumulus-pallet-weight-reclaim",
  "cumulus-pallet-xcm 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-ping 0.7.0",
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
- "cumulus-primitives-storage-weight-reclaim 1.0.0",
  "cumulus-primitives-utility 0.7.0",
  "frame-benchmarking 28.0.0",
  "frame-executive 28.0.0",
diff --git a/Cargo.toml b/Cargo.toml
index 64a11a340d1..c917a8a8fea 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -83,6 +83,7 @@ members = [
 	"cumulus/pallets/parachain-system/proc-macro",
 	"cumulus/pallets/session-benchmarking",
 	"cumulus/pallets/solo-to-para",
+	"cumulus/pallets/weight-reclaim",
 	"cumulus/pallets/xcm",
 	"cumulus/pallets/xcmp-queue",
 	"cumulus/parachains/common",
@@ -717,6 +718,7 @@ cumulus-pallet-parachain-system = { path = "cumulus/pallets/parachain-system", d
 cumulus-pallet-parachain-system-proc-macro = { path = "cumulus/pallets/parachain-system/proc-macro", default-features = false }
 cumulus-pallet-session-benchmarking = { path = "cumulus/pallets/session-benchmarking", default-features = false }
 cumulus-pallet-solo-to-para = { path = "cumulus/pallets/solo-to-para", default-features = false }
+cumulus-pallet-weight-reclaim = { path = "cumulus/pallets/weight-reclaim", default-features = false }
 cumulus-pallet-xcm = { path = "cumulus/pallets/xcm", default-features = false }
 cumulus-pallet-xcmp-queue = { path = "cumulus/pallets/xcmp-queue", default-features = false }
 cumulus-ping = { path = "cumulus/parachains/pallets/ping", default-features = false }
diff --git a/cumulus/pallets/weight-reclaim/Cargo.toml b/cumulus/pallets/weight-reclaim/Cargo.toml
new file mode 100644
index 00000000000..8bde6abaff6
--- /dev/null
+++ b/cumulus/pallets/weight-reclaim/Cargo.toml
@@ -0,0 +1,63 @@
+[package]
+name = "cumulus-pallet-weight-reclaim"
+version = "1.0.0"
+authors.workspace = true
+edition.workspace = true
+license = "Apache-2.0"
+homepage.workspace = true
+repository.workspace = true
+description = "pallet and transaction extensions for accurate proof size reclaim"
+
+[lints]
+workspace = true
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[dependencies]
+# Substrate dependencies
+sp-io = { workspace = true }
+sp-runtime = { workspace = true }
+sp-trie = { workspace = true }
+
+cumulus-primitives-storage-weight-reclaim = { workspace = true }
+frame-benchmarking = { optional = true, workspace = true }
+frame-support = { workspace = true }
+frame-system = { workspace = true }
+
+# Other dependencies
+codec = { features = ["derive"], workspace = true }
+derivative = { features = ["use_core"], workspace = true }
+docify = { workspace = true }
+log = { workspace = true, default-features = true }
+scale-info = { features = ["derive"], workspace = true }
+
+[dev-dependencies]
+cumulus-primitives-proof-size-hostfunction = { workspace = true }
+
+[features]
+default = ["std"]
+std = [
+	"codec/std",
+	"cumulus-primitives-proof-size-hostfunction/std",
+	"cumulus-primitives-storage-weight-reclaim/std",
+	"frame-benchmarking?/std",
+	"frame-support/std",
+	"frame-system/std",
+	"log/std",
+	"scale-info/std",
+	"sp-io/std",
+	"sp-runtime/std",
+	"sp-trie/std",
+]
+runtime-benchmarks = [
+	"frame-benchmarking/runtime-benchmarks",
+	"frame-support/runtime-benchmarks",
+	"frame-system/runtime-benchmarks",
+	"sp-runtime/runtime-benchmarks",
+]
+try-runtime = [
+	"frame-support/try-runtime",
+	"frame-system/try-runtime",
+	"sp-runtime/try-runtime",
+]
diff --git a/cumulus/pallets/weight-reclaim/src/benchmarks.rs b/cumulus/pallets/weight-reclaim/src/benchmarks.rs
new file mode 100644
index 00000000000..78bebc967d9
--- /dev/null
+++ b/cumulus/pallets/weight-reclaim/src/benchmarks.rs
@@ -0,0 +1,71 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+#![cfg(feature = "runtime-benchmarks")]
+
+use super::*;
+use frame_support::pallet_prelude::{DispatchClass, Pays};
+use frame_system::RawOrigin;
+use sp_runtime::traits::{AsTransactionAuthorizedOrigin, DispatchTransaction};
+
+#[frame_benchmarking::v2::benchmarks(
+	where T: Send + Sync,
+		<T as frame_system::Config>::RuntimeCall:
+			Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>,
+		<T as frame_system::Config>::RuntimeOrigin: AsTransactionAuthorizedOrigin,
+)]
+mod bench {
+	use super::*;
+	use frame_benchmarking::impl_test_function;
+
+	#[benchmark]
+	fn storage_weight_reclaim() {
+		let ext = StorageWeightReclaim::<T, ()>::new(());
+
+		let origin = RawOrigin::Root.into();
+		let call = T::RuntimeCall::from(frame_system::Call::remark { remark: alloc::vec![] });
+
+		let overestimate = 10_000;
+		let info = DispatchInfo {
+			call_weight: Weight::zero().add_proof_size(overestimate),
+			extension_weight: Weight::zero(),
+			class: DispatchClass::Normal,
+			pays_fee: Pays::No,
+		};
+
+		let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Pays::No };
+
+		let mut block_weight = frame_system::ConsumedWeight::default();
+		block_weight.accrue(Weight::from_parts(0, overestimate), info.class);
+
+		frame_system::BlockWeight::<T>::put(block_weight);
+
+		#[block]
+		{
+			assert!(ext.test_run(origin, &call, &info, 0, 0, |_| Ok(post_info)).unwrap().is_ok());
+		}
+
+		let final_block_proof_size =
+			frame_system::BlockWeight::<T>::get().get(info.class).proof_size();
+
+		assert!(
+			final_block_proof_size < overestimate,
+			"The proof size measured should be less than {overestimate}"
+		);
+	}
+
+	impl_benchmark_test_suite!(Pallet, crate::tests::setup_test_ext_default(), crate::tests::Test);
+}
diff --git a/cumulus/pallets/weight-reclaim/src/lib.rs b/cumulus/pallets/weight-reclaim/src/lib.rs
new file mode 100644
index 00000000000..bd9929033af
--- /dev/null
+++ b/cumulus/pallets/weight-reclaim/src/lib.rs
@@ -0,0 +1,311 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Pallet and transaction extensions to reclaim PoV proof size weight after an extrinsic has been
+//! applied.
+//!
+//! This crate provides:
+//! * [`StorageWeightReclaim`] transaction extension: it must wrap the whole transaction extension
+//!   pipeline.
+//! * The pallet required for the transaction extensions weight information and benchmarks.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+extern crate alloc;
+#[cfg(not(feature = "std"))]
+use alloc::vec::Vec;
+use codec::{Decode, Encode};
+use cumulus_primitives_storage_weight_reclaim::get_proof_size;
+use derivative::Derivative;
+use frame_support::{
+	dispatch::{DispatchInfo, PostDispatchInfo},
+	pallet_prelude::Weight,
+	traits::Defensive,
+};
+use scale_info::TypeInfo;
+use sp_runtime::{
+	traits::{DispatchInfoOf, Dispatchable, Implication, PostDispatchInfoOf, TransactionExtension},
+	transaction_validity::{TransactionSource, TransactionValidityError, ValidTransaction},
+	DispatchResult,
+};
+
+#[cfg(feature = "runtime-benchmarks")]
+pub mod benchmarks;
+#[cfg(test)]
+mod tests;
+mod weights;
+
+pub use pallet::*;
+pub use weights::WeightInfo;
+
+const LOG_TARGET: &'static str = "runtime::storage_reclaim_pallet";
+
+/// Pallet to use alongside the transaction extension [`StorageWeightReclaim`], the pallet provides
+/// weight information and benchmarks.
+#[frame_support::pallet]
+pub mod pallet {
+	use super::*;
+
+	#[pallet::pallet]
+	pub struct Pallet<T>(_);
+
+	#[pallet::config]
+	pub trait Config: frame_system::Config {
+		type WeightInfo: WeightInfo;
+	}
+}
+
+/// Storage weight reclaim mechanism.
+///
+/// This extension must wrap all the transaction extensions:
+#[doc = docify::embed!("./src/tests.rs", Tx)]
+///
+/// This extension checks the size of the node-side storage proof before and after executing a given
+/// extrinsic using the proof size host function. The difference between benchmarked and used weight
+/// is reclaimed.
+///
+/// If the benchmark was underestimating the proof size, then it is added to the block weight.
+///
+/// For the time part of the weight, it does same as system `WeightReclaim` extension, it
+/// calculates the unused weight using the post information and reclaim the unused weight.
+/// So this extension can be used as a drop-in replacement for `WeightReclaim` extension for
+/// parachains.
+#[derive(Encode, Decode, TypeInfo, Derivative)]
+#[derivative(
+	Clone(bound = "S: Clone"),
+	Eq(bound = "S: Eq"),
+	PartialEq(bound = "S: PartialEq"),
+	Default(bound = "S: Default")
+)]
+#[scale_info(skip_type_params(T))]
+pub struct StorageWeightReclaim<T, S>(pub S, core::marker::PhantomData<T>);
+
+impl<T, S> StorageWeightReclaim<T, S> {
+	/// Create a new `StorageWeightReclaim` instance.
+	pub fn new(s: S) -> Self {
+		Self(s, Default::default())
+	}
+}
+
+impl<T, S> From<S> for StorageWeightReclaim<T, S> {
+	fn from(s: S) -> Self {
+		Self::new(s)
+	}
+}
+
+impl<T, S: core::fmt::Debug> core::fmt::Debug for StorageWeightReclaim<T, S> {
+	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
+		#[cfg(feature = "std")]
+		let _ = write!(f, "StorageWeightReclaim<{:?}>", self.0);
+
+		#[cfg(not(feature = "std"))]
+		let _ = write!(f, "StorageWeightReclaim<wasm-stripped>");
+
+		Ok(())
+	}
+}
+
+impl<T: Config + Send + Sync, S: TransactionExtension<T::RuntimeCall>>
+	TransactionExtension<T::RuntimeCall> for StorageWeightReclaim<T, S>
+where
+	T::RuntimeCall: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>,
+{
+	const IDENTIFIER: &'static str = "StorageWeightReclaim<Use `metadata()`!>";
+
+	type Implicit = S::Implicit;
+
+	// Initial proof size and inner extension value.
+	type Val = (Option<u64>, S::Val);
+
+	// Initial proof size and inner extension pre.
+	type Pre = (Option<u64>, S::Pre);
+
+	fn implicit(&self) -> Result<Self::Implicit, TransactionValidityError> {
+		self.0.implicit()
+	}
+
+	fn metadata() -> Vec<sp_runtime::traits::TransactionExtensionMetadata> {
+		let mut inner = S::metadata();
+		inner.push(sp_runtime::traits::TransactionExtensionMetadata {
+			identifier: "StorageWeightReclaim",
+			ty: scale_info::meta_type::<()>(),
+			implicit: scale_info::meta_type::<()>(),
+		});
+		inner
+	}
+
+	fn weight(&self, call: &T::RuntimeCall) -> Weight {
+		T::WeightInfo::storage_weight_reclaim().saturating_add(self.0.weight(call))
+	}
+
+	fn validate(
+		&self,
+		origin: T::RuntimeOrigin,
+		call: &T::RuntimeCall,
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		len: usize,
+		self_implicit: Self::Implicit,
+		inherited_implication: &impl Implication,
+		source: TransactionSource,
+	) -> Result<(ValidTransaction, Self::Val, T::RuntimeOrigin), TransactionValidityError> {
+		let proof_size = get_proof_size();
+
+		self.0
+			.validate(origin, call, info, len, self_implicit, inherited_implication, source)
+			.map(|(validity, val, origin)| (validity, (proof_size, val), origin))
+	}
+
+	fn prepare(
+		self,
+		val: Self::Val,
+		origin: &T::RuntimeOrigin,
+		call: &T::RuntimeCall,
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		len: usize,
+	) -> Result<Self::Pre, TransactionValidityError> {
+		let (proof_size, inner_val) = val;
+		self.0.prepare(inner_val, origin, call, info, len).map(|pre| (proof_size, pre))
+	}
+
+	fn post_dispatch_details(
+		pre: Self::Pre,
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		post_info: &PostDispatchInfoOf<T::RuntimeCall>,
+		len: usize,
+		result: &DispatchResult,
+	) -> Result<Weight, TransactionValidityError> {
+		let (proof_size_before_dispatch, inner_pre) = pre;
+
+		let mut post_info_with_inner = *post_info;
+		S::post_dispatch(inner_pre, info, &mut post_info_with_inner, len, result)?;
+
+		let inner_refund = if let (Some(before_weight), Some(after_weight)) =
+			(post_info.actual_weight, post_info_with_inner.actual_weight)
+		{
+			before_weight.saturating_sub(after_weight)
+		} else {
+			Weight::zero()
+		};
+
+		let Some(proof_size_before_dispatch) = proof_size_before_dispatch else {
+			// We have no proof size information, there is nothing we can do.
+			return Ok(inner_refund);
+		};
+
+		let Some(proof_size_after_dispatch) = get_proof_size().defensive_proof(
+			"Proof recording enabled during prepare, now disabled. This should not happen.",
+		) else {
+			return Ok(inner_refund)
+		};
+
+		// The consumed proof size as measured by the host.
+		let measured_proof_size =
+			proof_size_after_dispatch.saturating_sub(proof_size_before_dispatch);
+
+		// The consumed weight as benchmarked. Calculated from post info and info.
+		// NOTE: `calc_actual_weight` will take the minimum of `post_info` and `info` weights.
+		// This means any underestimation of compute time in the pre dispatch info will not be
+		// taken into account.
+		let benchmarked_actual_weight = post_info_with_inner.calc_actual_weight(info);
+
+		let benchmarked_actual_proof_size = benchmarked_actual_weight.proof_size();
+		if benchmarked_actual_proof_size < measured_proof_size {
+			log::error!(
+				target: LOG_TARGET,
+				"Benchmarked storage weight smaller than consumed storage weight. \
+				benchmarked: {benchmarked_actual_proof_size} consumed: {measured_proof_size}"
+			);
+		} else {
+			log::trace!(
+				target: LOG_TARGET,
+				"Reclaiming storage weight. benchmarked: {benchmarked_actual_proof_size},
+				consumed: {measured_proof_size}"
+			);
+		}
+
+		let accurate_weight = benchmarked_actual_weight.set_proof_size(measured_proof_size);
+
+		let pov_size_missing_from_node = frame_system::BlockWeight::<T>::mutate(|current_weight| {
+			let already_reclaimed = frame_system::ExtrinsicWeightReclaimed::<T>::get();
+			current_weight.accrue(already_reclaimed, info.class);
+			current_weight.reduce(info.total_weight(), info.class);
+			current_weight.accrue(accurate_weight, info.class);
+
+			// If we encounter a situation where the node-side proof size is already higher than
+			// what we have in the runtime bookkeeping, we add the difference to the `BlockWeight`.
+			// This prevents that the proof size grows faster than the runtime proof size.
+			let extrinsic_len = frame_system::AllExtrinsicsLen::<T>::get().unwrap_or(0);
+			let node_side_pov_size = proof_size_after_dispatch.saturating_add(extrinsic_len.into());
+			let block_weight_proof_size = current_weight.total().proof_size();
+			let pov_size_missing_from_node =
+				node_side_pov_size.saturating_sub(block_weight_proof_size);
+			if pov_size_missing_from_node > 0 {
+				log::warn!(
+					target: LOG_TARGET,
+					"Node-side PoV size higher than runtime proof size weight. node-side: \
+					{node_side_pov_size} extrinsic_len: {extrinsic_len} runtime: \
+					{block_weight_proof_size}, missing: {pov_size_missing_from_node}. Setting to \
+					node-side proof size."
+				);
+				current_weight
+					.accrue(Weight::from_parts(0, pov_size_missing_from_node), info.class);
+			}
+
+			pov_size_missing_from_node
+		});
+
+		// The saturation will happen if the pre-dispatch weight is underestimating the proof
+		// size or if the node-side proof size is higher than expected.
+		// In this case the extrinsic proof size weight reclaimed is 0 and not a negative reclaim.
+		let accurate_unspent = info
+			.total_weight()
+			.saturating_sub(accurate_weight)
+			.saturating_sub(Weight::from_parts(0, pov_size_missing_from_node));
+		frame_system::ExtrinsicWeightReclaimed::<T>::put(accurate_unspent);
+
+		// Call have already returned their unspent amount.
+		// (also transaction extension prior in the pipeline, but there shouldn't be any.)
+		let already_unspent_in_tx_ext_pipeline = post_info.calc_unspent(info);
+		Ok(accurate_unspent.saturating_sub(already_unspent_in_tx_ext_pipeline))
+	}
+
+	fn bare_validate(
+		call: &T::RuntimeCall,
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		len: usize,
+	) -> frame_support::pallet_prelude::TransactionValidity {
+		S::bare_validate(call, info, len)
+	}
+
+	fn bare_validate_and_prepare(
+		call: &T::RuntimeCall,
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		len: usize,
+	) -> Result<(), TransactionValidityError> {
+		S::bare_validate_and_prepare(call, info, len)
+	}
+
+	fn bare_post_dispatch(
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		post_info: &mut PostDispatchInfoOf<T::RuntimeCall>,
+		len: usize,
+		result: &DispatchResult,
+	) -> Result<(), TransactionValidityError> {
+		S::bare_post_dispatch(info, post_info, len, result)?;
+
+		frame_system::Pallet::<T>::reclaim_weight(info, post_info)
+	}
+}
diff --git a/cumulus/pallets/weight-reclaim/src/tests.rs b/cumulus/pallets/weight-reclaim/src/tests.rs
new file mode 100644
index 00000000000..b87c107c7ec
--- /dev/null
+++ b/cumulus/pallets/weight-reclaim/src/tests.rs
@@ -0,0 +1,1050 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+#![cfg(test)]
+
+use super::*;
+use cumulus_primitives_proof_size_hostfunction::PROOF_RECORDING_DISABLED;
+use frame_support::{
+	assert_ok, derive_impl, dispatch::GetDispatchInfo, pallet_prelude::DispatchClass,
+};
+use sp_runtime::{
+	generic,
+	traits::{Applyable, BlakeTwo256, DispatchTransaction, Get},
+	BuildStorage,
+};
+use sp_trie::proof_size_extension::ProofSizeExt;
+
+thread_local! {
+	static CHECK_WEIGHT_WEIGHT: core::cell::RefCell<Weight> = Default::default();
+	static STORAGE_WEIGHT_RECLAIM_WEIGHT: core::cell::RefCell<Weight> = Default::default();
+	static MOCK_EXT_WEIGHT: core::cell::RefCell<Weight> = Default::default();
+	static MOCK_EXT_REFUND: core::cell::RefCell<Weight> = Default::default();
+}
+
+/// An extension which has some proof_size weight and some proof_size refund.
+#[derive(Encode, Decode, Debug, Clone, PartialEq, Eq, scale_info::TypeInfo)]
+pub struct MockExtensionWithRefund;
+
+impl TransactionExtension<RuntimeCall> for MockExtensionWithRefund {
+	const IDENTIFIER: &'static str = "mock_extension_with_refund";
+	type Implicit = ();
+	type Val = ();
+	type Pre = ();
+	fn weight(&self, _: &RuntimeCall) -> Weight {
+		MOCK_EXT_WEIGHT.with_borrow(|v| *v)
+	}
+	fn post_dispatch_details(
+		_pre: Self::Pre,
+		_info: &DispatchInfoOf<RuntimeCall>,
+		_post_info: &PostDispatchInfoOf<RuntimeCall>,
+		_len: usize,
+		_result: &DispatchResult,
+	) -> Result<Weight, TransactionValidityError> {
+		Ok(MOCK_EXT_REFUND.with_borrow(|v| *v))
+	}
+	fn bare_post_dispatch(
+		_info: &DispatchInfoOf<RuntimeCall>,
+		post_info: &mut PostDispatchInfoOf<RuntimeCall>,
+		_len: usize,
+		_result: &DispatchResult,
+	) -> Result<(), TransactionValidityError> {
+		if let Some(ref mut w) = post_info.actual_weight {
+			*w -= MOCK_EXT_REFUND.with_borrow(|v| *v);
+		}
+		Ok(())
+	}
+
+	sp_runtime::impl_tx_ext_default!(RuntimeCall; validate prepare);
+}
+
+pub type Tx =
+	crate::StorageWeightReclaim<Test, (frame_system::CheckWeight<Test>, MockExtensionWithRefund)>;
+type AccountId = u64;
+type Extrinsic = generic::UncheckedExtrinsic<AccountId, RuntimeCall, (), Tx>;
+type Block = generic::Block<generic::Header<AccountId, BlakeTwo256>, Extrinsic>;
+
+#[frame_support::runtime]
+mod runtime {
+	#[runtime::runtime]
+	#[runtime::derive(
+		RuntimeCall,
+		RuntimeEvent,
+		RuntimeError,
+		RuntimeOrigin,
+		RuntimeFreezeReason,
+		RuntimeHoldReason,
+		RuntimeSlashReason,
+		RuntimeLockId,
+		RuntimeTask
+	)]
+	pub struct Test;
+
+	#[runtime::pallet_index(0)]
+	pub type System = frame_system::Pallet<Test>;
+
+	#[runtime::pallet_index(1)]
+	pub type WeightReclaim = crate::Pallet<Test>;
+}
+
+pub struct MockWeightInfo;
+
+impl frame_system::ExtensionsWeightInfo for MockWeightInfo {
+	fn check_genesis() -> Weight {
+		Default::default()
+	}
+	fn check_mortality_mortal_transaction() -> Weight {
+		Default::default()
+	}
+	fn check_mortality_immortal_transaction() -> Weight {
+		Default::default()
+	}
+	fn check_non_zero_sender() -> Weight {
+		Default::default()
+	}
+	fn check_nonce() -> Weight {
+		Default::default()
+	}
+	fn check_spec_version() -> Weight {
+		Default::default()
+	}
+	fn check_tx_version() -> Weight {
+		Default::default()
+	}
+	fn check_weight() -> Weight {
+		CHECK_WEIGHT_WEIGHT.with_borrow(|v| *v)
+	}
+	fn weight_reclaim() -> Weight {
+		Default::default()
+	}
+}
+
+impl frame_system::WeightInfo for MockWeightInfo {
+	fn remark(_b: u32) -> Weight {
+		Weight::from_parts(400, 0)
+	}
+	fn set_code() -> Weight {
+		Weight::zero()
+	}
+	fn set_storage(_i: u32) -> Weight {
+		Weight::zero()
+	}
+	fn kill_prefix(_p: u32) -> Weight {
+		Weight::zero()
+	}
+	fn kill_storage(_i: u32) -> Weight {
+		Weight::zero()
+	}
+	fn set_heap_pages() -> Weight {
+		Weight::zero()
+	}
+	fn remark_with_event(_b: u32) -> Weight {
+		Weight::zero()
+	}
+	fn authorize_upgrade() -> Weight {
+		Weight::zero()
+	}
+	fn apply_authorized_upgrade() -> Weight {
+		Weight::zero()
+	}
+}
+
+impl crate::WeightInfo for MockWeightInfo {
+	fn storage_weight_reclaim() -> Weight {
+		STORAGE_WEIGHT_RECLAIM_WEIGHT.with_borrow(|v| *v)
+	}
+}
+
+#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
+impl frame_system::Config for Test {
+	type Block = Block;
+	type AccountData = ();
+	type MaxConsumers = frame_support::traits::ConstU32<3>;
+	type ExtensionsWeightInfo = MockWeightInfo;
+}
+
+impl crate::Config for Test {
+	type WeightInfo = MockWeightInfo;
+}
+
+fn new_test_ext() -> sp_io::TestExternalities {
+	RuntimeGenesisConfig::default().build_storage().unwrap().into()
+}
+
+struct TestRecorder {
+	return_values: Box<[usize]>,
+	counter: core::sync::atomic::AtomicUsize,
+}
+
+impl TestRecorder {
+	fn new(values: &[usize]) -> Self {
+		TestRecorder { return_values: values.into(), counter: Default::default() }
+	}
+}
+
+impl sp_trie::ProofSizeProvider for TestRecorder {
+	fn estimate_encoded_size(&self) -> usize {
+		let counter = self.counter.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
+		self.return_values[counter]
+	}
+}
+
+fn setup_test_externalities(proof_values: &[usize]) -> sp_io::TestExternalities {
+	let mut test_ext = new_test_ext();
+	let test_recorder = TestRecorder::new(proof_values);
+	test_ext.register_extension(ProofSizeExt::new(test_recorder));
+	test_ext
+}
+
+#[cfg(feature = "runtime-benchmarks")]
+pub fn setup_test_ext_default() -> sp_io::TestExternalities {
+	setup_test_externalities(&[0; 32])
+}
+
+fn set_current_storage_weight(new_weight: u64) {
+	frame_system::BlockWeight::<Test>::mutate(|current_weight| {
+		current_weight.set(Weight::from_parts(0, new_weight), DispatchClass::Normal);
+	});
+}
+
+fn get_storage_weight() -> Weight {
+	*frame_system::BlockWeight::<Test>::get().get(DispatchClass::Normal)
+}
+
+const CALL: &<Test as frame_system::Config>::RuntimeCall =
+	&RuntimeCall::System(frame_system::Call::set_heap_pages { pages: 0u64 });
+const ALICE_ORIGIN: frame_system::Origin<Test> = frame_system::Origin::<Test>::Signed(99);
+const LEN: usize = 150;
+
+fn new_tx_ext() -> Tx {
+	Tx::new((frame_system::CheckWeight::new(), MockExtensionWithRefund))
+}
+
+fn new_extrinsic() -> generic::CheckedExtrinsic<AccountId, RuntimeCall, Tx> {
+	generic::CheckedExtrinsic {
+		format: generic::ExtrinsicFormat::Signed(99, new_tx_ext()),
+		function: RuntimeCall::System(frame_system::Call::remark { remark: vec![] }),
+	}
+}
+
+#[allow(unused)]
+mod doc {
+	type Runtime = super::Test;
+	use crate::StorageWeightReclaim;
+
+	#[docify::export(Tx)]
+	type Tx = StorageWeightReclaim<
+		Runtime,
+		(
+			frame_system::CheckNonce<Runtime>,
+			frame_system::CheckWeight<Runtime>,
+			// ... all other extensions
+			// No need for `frame_system::WeightReclaim` as the reclaim.
+		),
+	>;
+}
+
+#[test]
+fn basic_refund_no_post_info() {
+	// The real cost will be 100 bytes of storage size
+	let mut test_ext = setup_test_externalities(&[0, 100]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(1000);
+
+		// Benchmarked storage weight: 500
+		let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() };
+		let mut post_info = PostDispatchInfo::default();
+
+		let tx_ext = new_tx_ext();
+
+		// Check weight should add 500 + 150 (len) to weight.
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		assert_eq!(pre.0, Some(0));
+
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight, None);
+		assert_eq!(get_storage_weight().proof_size(), 1250);
+	});
+}
+
+#[test]
+fn basic_refund_some_post_info() {
+	// The real cost will be 100 bytes of storage size
+	let mut test_ext = setup_test_externalities(&[0, 100]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(1000);
+
+		// Benchmarked storage weight: 500
+		let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() };
+		let mut post_info = PostDispatchInfo::default();
+		post_info.actual_weight = Some(info.total_weight());
+
+		let tx_ext = new_tx_ext();
+
+		// Check weight should add 500 + 150 (len) to weight.
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		assert_eq!(pre.0, Some(0));
+
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), Weight::from_parts(0, 100));
+		assert_eq!(get_storage_weight().proof_size(), 1250);
+	});
+}
+
+#[test]
+fn does_nothing_without_extension() {
+	// Proof size extension not registered
+	let mut test_ext = new_test_ext();
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(1000);
+
+		// Benchmarked storage weight: 500
+		let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() };
+		let mut post_info = PostDispatchInfo::default();
+		post_info.actual_weight = Some(info.total_weight());
+
+		let tx_ext = new_tx_ext();
+
+		// Check weight should add 500 + 150 (len) to weight.
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		assert_eq!(pre.0, None);
+
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), info.total_weight());
+		assert_eq!(get_storage_weight().proof_size(), 1650);
+	})
+}
+
+#[test]
+fn negative_refund_is_added_to_weight() {
+	let mut test_ext = setup_test_externalities(&[100, 300]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(1000);
+		// Benchmarked storage weight: 100
+		let info = DispatchInfo { call_weight: Weight::from_parts(0, 100), ..Default::default() };
+		let mut post_info = PostDispatchInfo::default();
+		post_info.actual_weight = Some(info.total_weight());
+
+		let tx_ext = new_tx_ext();
+
+		// Weight added should be 100 + 150 (len)
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		assert_eq!(pre.0, Some(100));
+
+		// We expect no refund
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), info.total_weight());
+		assert_eq!(
+			get_storage_weight().proof_size(),
+			1100 + LEN as u64 + info.total_weight().proof_size()
+		);
+	})
+}
+
+#[test]
+fn test_zero_proof_size() {
+	let mut test_ext = setup_test_externalities(&[0, 0]);
+
+	test_ext.execute_with(|| {
+		let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() };
+		let mut post_info = PostDispatchInfo::default();
+		post_info.actual_weight = Some(info.total_weight());
+
+		let tx_ext = new_tx_ext();
+
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		assert_eq!(pre.0, Some(0));
+
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), Weight::from_parts(0, 0));
+		// Proof size should be exactly equal to extrinsic length
+		assert_eq!(get_storage_weight().proof_size(), LEN as u64);
+	});
+}
+
+#[test]
+fn test_larger_pre_dispatch_proof_size() {
+	let mut test_ext = setup_test_externalities(&[300, 100]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(1300);
+
+		let info = DispatchInfo { call_weight: Weight::from_parts(0, 500), ..Default::default() };
+		let mut post_info = PostDispatchInfo::default();
+		post_info.actual_weight = Some(info.total_weight());
+
+		let tx_ext = new_tx_ext();
+
+		// Adds 500 + 150 (len) weight, total weight is 1950
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		assert_eq!(pre.0, Some(300));
+
+		// check weight:
+		// Refund 500 unspent weight according to `post_info`, total weight is now 1650
+		//
+		// storage reclaim:
+		// Recorded proof size is negative -200, total weight is now 1450
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), Weight::from_parts(0, 0));
+		assert_eq!(get_storage_weight().proof_size(), 1450);
+	});
+}
+
+#[test]
+fn test_incorporates_check_weight_unspent_weight() {
+	let mut test_ext = setup_test_externalities(&[100, 300]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(1000);
+
+		// Benchmarked storage weight: 300
+		let info = DispatchInfo { call_weight: Weight::from_parts(100, 300), ..Default::default() };
+
+		// Actual weight is 50
+		let mut post_info = PostDispatchInfo {
+			actual_weight: Some(Weight::from_parts(50, 250)),
+			pays_fee: Default::default(),
+		};
+
+		let tx_ext = new_tx_ext();
+
+		// Check weight should add 300 + 150 (len) of weight
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		assert_eq!(pre.0, Some(100));
+
+		// The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo`
+		// we always need to call `post_dispatch` to verify that they interoperate correctly.
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), Weight::from_parts(50, 350 - LEN as u64));
+		// Reclaimed 100
+		assert_eq!(get_storage_weight().proof_size(), 1350);
+	})
+}
+
+#[test]
+fn test_incorporates_check_weight_unspent_weight_on_negative() {
+	let mut test_ext = setup_test_externalities(&[100, 300]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(1000);
+		// Benchmarked storage weight: 50
+		let info = DispatchInfo { call_weight: Weight::from_parts(100, 50), ..Default::default() };
+
+		// Actual weight is 25
+		let mut post_info = PostDispatchInfo {
+			actual_weight: Some(Weight::from_parts(50, 25)),
+			pays_fee: Default::default(),
+		};
+
+		let tx_ext = new_tx_ext();
+
+		// Adds 50 + 150 (len) weight, total weight 1200
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+		assert_eq!(pre.0, Some(100));
+
+		// The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo`
+		// CheckWeight: refunds unspent 25 weight according to `post_info`, 1175
+		//
+		// storage reclaim:
+		// Adds 200 - 25 (unspent) == 175 weight, total weight 1350
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), Weight::from_parts(50, 25));
+		assert_eq!(get_storage_weight().proof_size(), 1350);
+	})
+}
+
+#[test]
+fn test_nothing_reclaimed() {
+	let mut test_ext = setup_test_externalities(&[0, 100]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(0);
+		// Benchmarked storage weight: 100
+		let info = DispatchInfo { call_weight: Weight::from_parts(100, 100), ..Default::default() };
+
+		// Actual proof size is 100
+		let mut post_info = PostDispatchInfo {
+			actual_weight: Some(Weight::from_parts(50, 100)),
+			pays_fee: Default::default(),
+		};
+
+		let tx_ext = new_tx_ext();
+
+		// Adds benchmarked weight 100 + 150 (len), total weight is now 250
+		let (pre, _) = tx_ext
+			.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+			.unwrap();
+
+		// Weight should go up by 150 len + 100 proof size weight, total weight 250
+		assert_eq!(get_storage_weight().proof_size(), 250);
+
+		// Should return `setup_test_externalities` proof recorder value: 100.
+		assert_eq!(pre.0, Some(0));
+
+		// The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo`
+		// we always need to call `post_dispatch` to verify that they interoperate correctly.
+		// Nothing to refund, unspent is 0, total weight 250
+		//
+		// weight reclaim:
+		// `setup_test_externalities` proof recorder value: 200, so this means the extrinsic
+		// actually used 100 proof size.
+		// Nothing to refund or add, weight matches proof recorder
+		assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+		assert_eq!(post_info.actual_weight.unwrap(), Weight::from_parts(50, 100));
+		// Check block len weight was not reclaimed:
+		// 100 weight + 150 extrinsic len == 250 proof size
+		assert_eq!(get_storage_weight().proof_size(), 250);
+	})
+}
+
+// Test for refund of calls and related proof size
+#[test]
+fn test_series() {
+	struct TestCfg {
+		measured_proof_size_pre_dispatch: u64,
+		measured_proof_size_post_dispatch: u64,
+		info_call_weight: Weight,
+		info_extension_weight: Weight,
+		post_info_actual_weight: Option<Weight>,
+		block_weight_pre_dispatch: Weight,
+		mock_ext_refund: Weight,
+		assert_post_info_weight: Option<Weight>,
+		assert_block_weight_post_dispatch: Weight,
+	}
+
+	let base_extrinsic = <<Test as frame_system::Config>::BlockWeights as Get<
+		frame_system::limits::BlockWeights,
+	>>::get()
+	.per_class
+	.get(DispatchClass::Normal)
+	.base_extrinsic;
+
+	let tests = vec![
+		// Info is exact, no post info, no refund.
+		TestCfg {
+			measured_proof_size_pre_dispatch: 100,
+			measured_proof_size_post_dispatch: 400,
+			info_call_weight: Weight::from_parts(40, 100),
+			info_extension_weight: Weight::from_parts(60, 200),
+			post_info_actual_weight: None,
+			block_weight_pre_dispatch: Weight::from_parts(1000, 1000),
+			mock_ext_refund: Weight::from_parts(0, 0),
+			assert_post_info_weight: None,
+			assert_block_weight_post_dispatch: base_extrinsic +
+				Weight::from_parts(1100, 1300 + LEN as u64),
+		},
+		// some tx ext refund is ignored, because post info is None.
+		TestCfg {
+			measured_proof_size_pre_dispatch: 100,
+			measured_proof_size_post_dispatch: 400,
+			info_call_weight: Weight::from_parts(40, 100),
+			info_extension_weight: Weight::from_parts(60, 200),
+			post_info_actual_weight: None,
+			block_weight_pre_dispatch: Weight::from_parts(1000, 1000),
+			mock_ext_refund: Weight::from_parts(20, 20),
+			assert_post_info_weight: None,
+			assert_block_weight_post_dispatch: base_extrinsic +
+				Weight::from_parts(1100, 1300 + LEN as u64),
+		},
+		// some tx ext refund is ignored on proof size because lower than actual measure.
+		TestCfg {
+			measured_proof_size_pre_dispatch: 100,
+			measured_proof_size_post_dispatch: 400,
+			info_call_weight: Weight::from_parts(40, 100),
+			info_extension_weight: Weight::from_parts(60, 200),
+			post_info_actual_weight: Some(Weight::from_parts(100, 300)),
+			block_weight_pre_dispatch: Weight::from_parts(1000, 1000),
+			mock_ext_refund: Weight::from_parts(20, 20),
+			assert_post_info_weight: Some(Weight::from_parts(80, 300)),
+			assert_block_weight_post_dispatch: base_extrinsic +
+				Weight::from_parts(1080, 1300 + LEN as u64),
+		},
+		// post info doesn't double refund the call and is missing some.
+		TestCfg {
+			measured_proof_size_pre_dispatch: 100,
+			measured_proof_size_post_dispatch: 350,
+			info_call_weight: Weight::from_parts(40, 100),
+			info_extension_weight: Weight::from_parts(60, 200),
+			post_info_actual_weight: Some(Weight::from_parts(60, 200)),
+			block_weight_pre_dispatch: Weight::from_parts(1000, 1000),
+			mock_ext_refund: Weight::from_parts(20, 20),
+			// 50 are missed in pov because 100 is unspent in post info but it should be only 50.
+			assert_post_info_weight: Some(Weight::from_parts(40, 200)),
+			assert_block_weight_post_dispatch: base_extrinsic +
+				Weight::from_parts(1040, 1250 + LEN as u64),
+		},
+		// post info doesn't double refund the call and is accurate.
+		TestCfg {
+			measured_proof_size_pre_dispatch: 100,
+			measured_proof_size_post_dispatch: 250,
+			info_call_weight: Weight::from_parts(40, 100),
+			info_extension_weight: Weight::from_parts(60, 200),
+			post_info_actual_weight: Some(Weight::from_parts(60, 200)),
+			block_weight_pre_dispatch: Weight::from_parts(1000, 1000),
+			mock_ext_refund: Weight::from_parts(20, 20),
+			assert_post_info_weight: Some(Weight::from_parts(40, 150)),
+			assert_block_weight_post_dispatch: base_extrinsic +
+				Weight::from_parts(1040, 1150 + LEN as u64),
+		},
+		// post info doesn't double refund the call and is accurate. Even if mock ext is refunding
+		// too much.
+		TestCfg {
+			measured_proof_size_pre_dispatch: 100,
+			measured_proof_size_post_dispatch: 250,
+			info_call_weight: Weight::from_parts(40, 100),
+			info_extension_weight: Weight::from_parts(60, 200),
+			post_info_actual_weight: Some(Weight::from_parts(60, 200)),
+			block_weight_pre_dispatch: Weight::from_parts(1000, 1000),
+			mock_ext_refund: Weight::from_parts(20, 300),
+			assert_post_info_weight: Some(Weight::from_parts(40, 150)),
+			assert_block_weight_post_dispatch: base_extrinsic +
+				Weight::from_parts(1040, 1150 + LEN as u64),
+		},
+	];
+
+	for (i, test) in tests.into_iter().enumerate() {
+		dbg!("test number: ", i);
+		MOCK_EXT_REFUND.with_borrow_mut(|v| *v = test.mock_ext_refund);
+		let mut test_ext = setup_test_externalities(&[
+			test.measured_proof_size_pre_dispatch as usize,
+			test.measured_proof_size_post_dispatch as usize,
+		]);
+
+		test_ext.execute_with(|| {
+			frame_system::BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(test.block_weight_pre_dispatch, DispatchClass::Normal);
+			});
+			// Benchmarked storage weight: 50
+			let info = DispatchInfo {
+				call_weight: test.info_call_weight,
+				extension_weight: test.info_extension_weight,
+				..Default::default()
+			};
+			let mut post_info = PostDispatchInfo {
+				actual_weight: test.post_info_actual_weight,
+				pays_fee: Default::default(),
+			};
+			let tx_ext = new_tx_ext();
+			let (pre, _) = tx_ext
+				.validate_and_prepare(ALICE_ORIGIN.clone().into(), CALL, &info, LEN, 0)
+				.unwrap();
+			assert_ok!(Tx::post_dispatch(pre, &info, &mut post_info, LEN, &Ok(())));
+
+			assert_eq!(post_info.actual_weight, test.assert_post_info_weight);
+			assert_eq!(
+				*frame_system::BlockWeight::<Test>::get().get(DispatchClass::Normal),
+				test.assert_block_weight_post_dispatch,
+			);
+		})
+	}
+}
+
+#[test]
+fn storage_size_reported_correctly() {
+	let mut test_ext = setup_test_externalities(&[1000]);
+	test_ext.execute_with(|| {
+		assert_eq!(get_proof_size(), Some(1000));
+	});
+
+	let mut test_ext = new_test_ext();
+
+	let test_recorder = TestRecorder::new(&[0]);
+
+	test_ext.register_extension(ProofSizeExt::new(test_recorder));
+
+	test_ext.execute_with(|| {
+		assert_eq!(get_proof_size(), Some(0));
+	});
+}
+
+#[test]
+fn storage_size_disabled_reported_correctly() {
+	let mut test_ext = setup_test_externalities(&[PROOF_RECORDING_DISABLED as usize]);
+
+	test_ext.execute_with(|| {
+		assert_eq!(get_proof_size(), None);
+	});
+}
+
+#[test]
+fn full_basic_refund() {
+	// Settings for the test:
+	let actual_used_proof_size = 200;
+	let check_weight = 100;
+	let storage_weight_reclaim = 100;
+	let mock_ext = 142;
+	let mock_ext_refund = 100;
+
+	// Test execution:
+	CHECK_WEIGHT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(1, check_weight));
+	STORAGE_WEIGHT_RECLAIM_WEIGHT
+		.with_borrow_mut(|v| *v = Weight::from_parts(1, storage_weight_reclaim));
+	MOCK_EXT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(36, mock_ext));
+	MOCK_EXT_REFUND.with_borrow_mut(|v| *v = Weight::from_parts(35, mock_ext_refund));
+
+	let initial_storage_weight = 1212u64;
+
+	let mut test_ext = setup_test_externalities(&[
+		initial_storage_weight as usize,
+		initial_storage_weight as usize + actual_used_proof_size,
+	]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(initial_storage_weight);
+
+		let extrinsic = new_extrinsic();
+		let call_info = extrinsic.function.get_dispatch_info();
+
+		let info = extrinsic.get_dispatch_info();
+		let post_info = extrinsic.apply::<Test>(&info, LEN).unwrap().unwrap();
+
+		// Assertions:
+		assert_eq!(
+			post_info.actual_weight.unwrap().ref_time(),
+			call_info.call_weight.ref_time() + 3,
+		);
+		assert_eq!(
+			post_info.actual_weight.unwrap().proof_size(),
+			// LEN is part of the base extrinsic, not the post info weight actual weight.
+			actual_used_proof_size as u64,
+		);
+		assert_eq!(
+			get_storage_weight().proof_size(),
+			initial_storage_weight + actual_used_proof_size as u64 + LEN as u64
+		);
+	});
+}
+
+#[test]
+fn full_accrue() {
+	// Settings for the test:
+	let actual_used_proof_size = 400;
+	let check_weight = 100;
+	let storage_weight_reclaim = 100;
+	let mock_ext = 142;
+	let mock_ext_refund = 100;
+
+	// Test execution:
+	CHECK_WEIGHT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(1, check_weight));
+	STORAGE_WEIGHT_RECLAIM_WEIGHT
+		.with_borrow_mut(|v| *v = Weight::from_parts(1, storage_weight_reclaim));
+	MOCK_EXT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(36, mock_ext));
+	MOCK_EXT_REFUND.with_borrow_mut(|v| *v = Weight::from_parts(35, mock_ext_refund));
+
+	let initial_storage_weight = 1212u64;
+
+	let mut test_ext = setup_test_externalities(&[
+		initial_storage_weight as usize,
+		initial_storage_weight as usize + actual_used_proof_size,
+	]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(initial_storage_weight);
+
+		let extrinsic = new_extrinsic();
+		let call_info = extrinsic.function.get_dispatch_info();
+
+		let info = extrinsic.get_dispatch_info();
+		let post_info = extrinsic.apply::<Test>(&info, LEN).unwrap().unwrap();
+
+		// Assertions:
+		assert_eq!(
+			post_info.actual_weight.unwrap().ref_time(),
+			call_info.call_weight.ref_time() + 3,
+		);
+		assert_eq!(
+			post_info.actual_weight.unwrap().proof_size(),
+			info.total_weight().proof_size(), // The post info doesn't get the accrue.
+		);
+		assert_eq!(
+			get_storage_weight().proof_size(),
+			initial_storage_weight + actual_used_proof_size as u64 + LEN as u64
+		);
+	});
+}
+
+#[test]
+fn bare_is_reclaimed() {
+	let mut test_ext = setup_test_externalities(&[]);
+	test_ext.execute_with(|| {
+		let info = DispatchInfo {
+			call_weight: Weight::from_parts(100, 100),
+			extension_weight: Weight::from_parts(100, 100),
+			class: DispatchClass::Normal,
+			pays_fee: Default::default(),
+		};
+		let mut post_info = PostDispatchInfo {
+			actual_weight: Some(Weight::from_parts(100, 100)),
+			pays_fee: Default::default(),
+		};
+		MOCK_EXT_REFUND.with_borrow_mut(|v| *v = Weight::from_parts(10, 10));
+
+		frame_system::BlockWeight::<Test>::mutate(|current_weight| {
+			current_weight
+				.set(Weight::from_parts(45, 45) + info.total_weight(), DispatchClass::Normal);
+		});
+
+		StorageWeightReclaim::<Test, MockExtensionWithRefund>::bare_post_dispatch(
+			&info,
+			&mut post_info,
+			0,
+			&Ok(()),
+		)
+		.expect("tx is valid");
+
+		assert_eq!(
+			*frame_system::BlockWeight::<Test>::get().get(DispatchClass::Normal),
+			Weight::from_parts(45 + 90, 45 + 90),
+		);
+	});
+}
+
+#[test]
+fn sets_to_node_storage_proof_if_higher() {
+	struct TestCfg {
+		initial_proof_size: u64,
+		post_dispatch_proof_size: u64,
+		mock_ext_proof_size: u64,
+		pre_dispatch_block_proof_size: u64,
+		assert_final_block_proof_size: u64,
+	}
+
+	let tests = vec![
+		// The storage proof reported by the proof recorder is higher than what is stored on
+		// the runtime side.
+		TestCfg {
+			initial_proof_size: 1000,
+			post_dispatch_proof_size: 1005,
+			mock_ext_proof_size: 0,
+			pre_dispatch_block_proof_size: 5,
+			// We expect that the storage weight was set to the node-side proof size (1005) +
+			// extrinsics length (150)
+			assert_final_block_proof_size: 1155,
+		},
+		// In this second scenario the proof size on the node side is only lower
+		// after reclaim happened.
+		TestCfg {
+			initial_proof_size: 175,
+			post_dispatch_proof_size: 180,
+			mock_ext_proof_size: 100,
+			pre_dispatch_block_proof_size: 85,
+			// After the pre_dispatch, the BlockWeight proof size will be
+			// 85 (initial) + 100 (benched) + 150 (tx length) = 335
+			//
+			// We expect that the storage weight was set to the node-side proof weight
+			// First we will reclaim 95, which leaves us with 240 BlockWeight.
+			// This is lower than 180 (proof size hf) + 150 (length).
+			// So we expect it to be set to 330.
+			assert_final_block_proof_size: 330,
+		},
+	];
+
+	for test in tests {
+		let mut test_ext = setup_test_externalities(&[
+			test.initial_proof_size as usize,
+			test.post_dispatch_proof_size as usize,
+		]);
+
+		CHECK_WEIGHT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(0, 0));
+		STORAGE_WEIGHT_RECLAIM_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(0, 0));
+		MOCK_EXT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(0, test.mock_ext_proof_size));
+
+		test_ext.execute_with(|| {
+			set_current_storage_weight(test.pre_dispatch_block_proof_size);
+
+			let extrinsic = new_extrinsic();
+			let call_info = extrinsic.function.get_dispatch_info();
+			assert_eq!(call_info.call_weight.proof_size(), 0);
+
+			let info = extrinsic.get_dispatch_info();
+			let _post_info = extrinsic.apply::<Test>(&info, LEN).unwrap().unwrap();
+
+			assert_eq!(get_storage_weight().proof_size(), test.assert_final_block_proof_size);
+		})
+	}
+}
+
+#[test]
+fn test_pov_missing_from_node_reclaim() {
+	// Test scenario: after dispatch the pov size from node side is less than block weight.
+	// Ensure `pov_size_missing_from_node` is calculated correctly, and `ExtrinsicWeightReclaimed`
+	// is updated correctly.
+
+	// Proof size:
+	let bench_pre_dispatch_call = 220;
+	let bench_post_dispatch_actual = 90;
+	let len = 20; // Only one extrinsic in the scenario. So all extrinsics length.
+	let block_pre_dispatch = 100;
+	let missing_from_node = 50;
+	let node_diff = 70;
+
+	let node_pre_dispatch = block_pre_dispatch + missing_from_node;
+	let node_post_dispatch = node_pre_dispatch + node_diff;
+
+	// Initialize the test.
+	let mut test_ext =
+		setup_test_externalities(&[node_pre_dispatch as usize, node_post_dispatch as usize]);
+
+	test_ext.execute_with(|| {
+		set_current_storage_weight(block_pre_dispatch);
+		let info = DispatchInfo {
+			call_weight: Weight::from_parts(0, bench_pre_dispatch_call),
+			extension_weight: Weight::from_parts(0, 0),
+			..Default::default()
+		};
+		let post_info = PostDispatchInfo {
+			actual_weight: Some(Weight::from_parts(0, bench_post_dispatch_actual)),
+			..Default::default()
+		};
+
+		// Execute the transaction.
+		let tx_ext = StorageWeightReclaim::<Test, frame_system::CheckWeight<Test>>::new(
+			frame_system::CheckWeight::new(),
+		);
+		tx_ext
+			.test_run(ALICE_ORIGIN.clone().into(), CALL, &info, len as usize, 0, |_| Ok(post_info))
+			.expect("valid")
+			.expect("success");
+
+		// Assert the results.
+		assert_eq!(
+			frame_system::BlockWeight::<Test>::get().get(DispatchClass::Normal).proof_size(),
+			node_post_dispatch + len,
+		);
+		assert_eq!(
+			frame_system::ExtrinsicWeightReclaimed::<Test>::get().proof_size(),
+			bench_pre_dispatch_call - node_diff - missing_from_node,
+		);
+	});
+}
+
+#[test]
+fn test_ref_time_weight_reclaim() {
+	// Test scenario: after dispatch the time weight is refunded correctly.
+
+	// Time weight:
+	let bench_pre_dispatch_call = 145;
+	let bench_post_dispatch_actual = 104;
+	let bench_mock_ext_weight = 63;
+	let bench_mock_ext_refund = 22;
+	let len = 20; // Only one extrinsic in the scenario. So all extrinsics length.
+	let block_pre_dispatch = 121;
+	let node_pre_dispatch = 0;
+	let node_post_dispatch = 0;
+
+	// Initialize the test.
+	CHECK_WEIGHT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(0, 0));
+	STORAGE_WEIGHT_RECLAIM_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(0, 0));
+	MOCK_EXT_WEIGHT.with_borrow_mut(|v| *v = Weight::from_parts(bench_mock_ext_weight, 0));
+	MOCK_EXT_REFUND.with_borrow_mut(|v| *v = Weight::from_parts(bench_mock_ext_refund, 0));
+
+	let base_extrinsic = <<Test as frame_system::Config>::BlockWeights as Get<
+		frame_system::limits::BlockWeights,
+	>>::get()
+	.per_class
+	.get(DispatchClass::Normal)
+	.base_extrinsic;
+
+	let mut test_ext =
+		setup_test_externalities(&[node_pre_dispatch as usize, node_post_dispatch as usize]);
+
+	test_ext.execute_with(|| {
+		frame_system::BlockWeight::<Test>::mutate(|current_weight| {
+			current_weight.set(Weight::from_parts(block_pre_dispatch, 0), DispatchClass::Normal);
+		});
+		let info = DispatchInfo {
+			call_weight: Weight::from_parts(bench_pre_dispatch_call, 0),
+			extension_weight: Weight::from_parts(bench_mock_ext_weight, 0),
+			..Default::default()
+		};
+		let post_info = PostDispatchInfo {
+			actual_weight: Some(Weight::from_parts(bench_post_dispatch_actual, 0)),
+			..Default::default()
+		};
+
+		type InnerTxExt = (frame_system::CheckWeight<Test>, MockExtensionWithRefund);
+		// Execute the transaction.
+		let tx_ext = StorageWeightReclaim::<Test, InnerTxExt>::new((
+			frame_system::CheckWeight::new(),
+			MockExtensionWithRefund,
+		));
+		tx_ext
+			.test_run(ALICE_ORIGIN.clone().into(), CALL, &info, len as usize, 0, |_| Ok(post_info))
+			.expect("valid transaction extension pipeline")
+			.expect("success");
+
+		// Assert the results.
+		assert_eq!(
+			frame_system::BlockWeight::<Test>::get().get(DispatchClass::Normal).ref_time(),
+			block_pre_dispatch +
+				base_extrinsic.ref_time() +
+				bench_post_dispatch_actual +
+				bench_mock_ext_weight -
+				bench_mock_ext_refund,
+		);
+		assert_eq!(
+			frame_system::ExtrinsicWeightReclaimed::<Test>::get().ref_time(),
+			bench_pre_dispatch_call - bench_post_dispatch_actual + bench_mock_ext_refund,
+		);
+	});
+}
+
+#[test]
+fn test_metadata() {
+	assert_eq!(
+		StorageWeightReclaim::<Test, frame_system::CheckWeight<Test>>::metadata()
+			.iter()
+			.map(|m| m.identifier)
+			.collect::<Vec<_>>(),
+		vec!["CheckWeight", "StorageWeightReclaim"]
+	);
+}
diff --git a/cumulus/pallets/weight-reclaim/src/weights.rs b/cumulus/pallets/weight-reclaim/src/weights.rs
new file mode 100644
index 00000000000..e651c8a7831
--- /dev/null
+++ b/cumulus/pallets/weight-reclaim/src/weights.rs
@@ -0,0 +1,74 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-08-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `fedora`, CPU: `13th Gen Intel(R) Core(TM) i7-1360P`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024`
+
+// Executed Command:
+// ./target/release/parachain-template-node
+// benchmark
+// pallet
+// --pallet
+// cumulus-pallet-weight-reclaim
+// --chain
+// dev
+// --output
+// cumulus/pallets/weight-reclaim/src/weights.rs
+// --template
+// substrate/.maintain/frame-weight-template.hbs
+// --extrinsic
+// *
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
+use core::marker::PhantomData;
+
+/// Weight functions needed for `cumulus_pallet_weight_reclaim`.
+pub trait WeightInfo {
+	fn storage_weight_reclaim() -> Weight;
+}
+
+/// Weights for `cumulus_pallet_weight_reclaim` using the Substrate node and recommended hardware.
+pub struct SubstrateWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 2_247_000 picoseconds.
+		Weight::from_parts(2_466_000, 0)
+	}
+}
+
+// For backwards compatibility and tests.
+impl WeightInfo for () {
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 2_247_000 picoseconds.
+		Weight::from_parts(2_466_000, 0)
+	}
+}
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml
index c954ddb7b8c..abe59a8439a 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml
@@ -80,11 +80,11 @@ assets-common = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-collator-selection = { workspace = true }
 parachain-info = { workspace = true }
@@ -112,6 +112,7 @@ runtime-benchmarks = [
 	"assets-common/runtime-benchmarks",
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -151,6 +152,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
@@ -192,11 +194,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking?/std",
 	"frame-executive/std",
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
index 8f4ae4670ac..1db152e39fd 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
@@ -182,6 +182,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -958,6 +962,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 3,
 		ParachainInfo: parachain_info = 4,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 5,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -1012,18 +1017,20 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 /// BlockId type as expected by this runtime.
 pub type BlockId = generic::BlockId<Block>;
 /// The extension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_asset_conversion_tx_payment::ChargeAssetTxPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_asset_conversion_tx_payment::ChargeAssetTxPayment<Runtime>,
+		frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	),
+>;
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
 	generic::UncheckedExtrinsic<Address, RuntimeCall, Signature, TxExtension>;
@@ -1207,6 +1214,7 @@ mod benches {
 		// NOTE: Make sure you point to the individual modules below.
 		[pallet_xcm_benchmarks::fungible, XcmBalances]
 		[pallet_xcm_benchmarks::generic, XcmGeneric]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 00000000000..c8f9bb7cd56
--- /dev/null
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=asset-hub-rococo-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 7_301_000 picoseconds.
+		Weight::from_parts(7_536_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs
index 182410f20ff..a5c9fea3cdf 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/frame_system_extensions.rs
@@ -16,28 +16,29 @@
 
 //! Autogenerated weights for `frame_system_extensions`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor`
-//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024
+//! HOSTNAME: `697235d969a1`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/release/polkadot-parachain
+// frame-omni-bencher
+// v1
 // benchmark
 // pallet
-// --wasm-execution=compiled
+// --extrinsic=*
+// --runtime=target/release/wbuild/asset-hub-rococo-runtime/asset_hub_rococo_runtime.wasm
 // --pallet=frame_system_extensions
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights
+// --wasm-execution=compiled
+// --steps=50
+// --repeat=20
+// --heap-pages=4096
 // --no-storage-info
-// --no-median-slopes
 // --no-min-squares
-// --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
-// --header=./cumulus/file_header.txt
-// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/
-// --chain=asset-hub-rococo-dev
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -56,8 +57,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `54`
 		//  Estimated: `3509`
-		// Minimum execution time: 3_637_000 picoseconds.
-		Weight::from_parts(6_382_000, 0)
+		// Minimum execution time: 8_313_000 picoseconds.
+		Weight::from_parts(8_528_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -67,8 +68,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
 		//  Estimated: `3509`
-		// Minimum execution time: 5_841_000 picoseconds.
-		Weight::from_parts(8_776_000, 0)
+		// Minimum execution time: 12_527_000 picoseconds.
+		Weight::from_parts(13_006_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -78,8 +79,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
 		//  Estimated: `3509`
-		// Minimum execution time: 5_841_000 picoseconds.
-		Weight::from_parts(8_776_000, 0)
+		// Minimum execution time: 12_380_000 picoseconds.
+		Weight::from_parts(12_922_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -87,44 +88,64 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 561_000 picoseconds.
-		Weight::from_parts(2_705_000, 0)
+		// Minimum execution time: 782_000 picoseconds.
+		Weight::from_parts(855_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	fn check_nonce() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 3_316_000 picoseconds.
-		Weight::from_parts(5_771_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `101`
+		//  Estimated: `3593`
+		// Minimum execution time: 11_743_000 picoseconds.
+		Weight::from_parts(12_067_000, 0)
+			.saturating_add(Weight::from_parts(0, 3593))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	fn check_spec_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 511_000 picoseconds.
-		Weight::from_parts(2_575_000, 0)
+		// Minimum execution time: 644_000 picoseconds.
+		Weight::from_parts(697_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 501_000 picoseconds.
-		Weight::from_parts(2_595_000, 0)
+		// Minimum execution time: 605_000 picoseconds.
+		Weight::from_parts(700_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
 	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `System::BlockWeight` (r:1 w:1)
 	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `24`
 		//  Estimated: `1533`
-		// Minimum execution time: 3_687_000 picoseconds.
-		Weight::from_parts(6_192_000, 0)
+		// Minimum execution time: 9_796_000 picoseconds.
+		Weight::from_parts(10_365_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `1533`
+		// Minimum execution time: 4_855_000 picoseconds.
+		Weight::from_parts(5_050_000, 0)
 			.saturating_add(Weight::from_parts(0, 1533))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs
index 33f111009ed..ae78a56d8b3 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs
@@ -16,6 +16,7 @@
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml
index 7c31745d8f6..cb10ae9a480 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml
@@ -81,11 +81,11 @@ assets-common = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-collator-selection = { workspace = true }
 pallet-message-queue = { workspace = true }
@@ -114,6 +114,7 @@ runtime-benchmarks = [
 	"assets-common/runtime-benchmarks",
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -155,6 +156,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
@@ -198,11 +200,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking?/std",
 	"frame-executive/std",
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
index 26ef3219a1e..5fb495e4e8c 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
@@ -183,6 +183,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -1000,6 +1004,7 @@ construct_runtime!(
 		// RandomnessCollectiveFlip = 2 removed
 		Timestamp: pallet_timestamp = 3,
 		ParachainInfo: parachain_info = 4,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 5,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -1057,18 +1062,20 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 /// BlockId type as expected by this runtime.
 pub type BlockId = generic::BlockId<Block>;
 /// The extension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_asset_conversion_tx_payment::ChargeAssetTxPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_asset_conversion_tx_payment::ChargeAssetTxPayment<Runtime>,
+		frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	),
+>;
 
 /// Default extensions applied to Ethereum transactions.
 #[derive(Clone, PartialEq, Eq, Debug)]
@@ -1088,9 +1095,9 @@ impl EthExtra for EthExtraImpl {
 			frame_system::CheckNonce::<Runtime>::from(nonce),
 			frame_system::CheckWeight::<Runtime>::new(),
 			pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::<Runtime>::from(tip, None),
-			cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::<Runtime>::new(),
 			frame_metadata_hash_extension::CheckMetadataHash::<Runtime>::new(false),
 		)
+			.into()
 	}
 }
 
@@ -1337,6 +1344,7 @@ mod benches {
 		// NOTE: Make sure you point to the individual modules below.
 		[pallet_xcm_benchmarks::fungible, XcmBalances]
 		[pallet_xcm_benchmarks::generic, XcmGeneric]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 00000000000..1573a278e24
--- /dev/null
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=asset-hub-westend-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 7_470_000 picoseconds.
+		Weight::from_parts(7_695_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs
index e8dd9763c28..a1bb92cf700 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/frame_system_extensions.rs
@@ -16,28 +16,29 @@
 
 //! Autogenerated weights for `frame_system_extensions`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor`
-//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024
+//! HOSTNAME: `697235d969a1`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/release/polkadot-parachain
+// frame-omni-bencher
+// v1
 // benchmark
 // pallet
-// --wasm-execution=compiled
+// --extrinsic=*
+// --runtime=target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.wasm
 // --pallet=frame_system_extensions
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights
+// --wasm-execution=compiled
+// --steps=50
+// --repeat=20
+// --heap-pages=4096
 // --no-storage-info
-// --no-median-slopes
 // --no-min-squares
-// --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
-// --header=./cumulus/file_header.txt
-// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/
-// --chain=asset-hub-westend-dev
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -56,8 +57,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `54`
 		//  Estimated: `3509`
-		// Minimum execution time: 3_206_000 picoseconds.
-		Weight::from_parts(6_212_000, 0)
+		// Minimum execution time: 6_329_000 picoseconds.
+		Weight::from_parts(6_665_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -67,8 +68,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
 		//  Estimated: `3509`
-		// Minimum execution time: 5_851_000 picoseconds.
-		Weight::from_parts(8_847_000, 0)
+		// Minimum execution time: 12_110_000 picoseconds.
+		Weight::from_parts(12_883_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -78,8 +79,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
 		//  Estimated: `3509`
-		// Minimum execution time: 5_851_000 picoseconds.
-		Weight::from_parts(8_847_000, 0)
+		// Minimum execution time: 12_241_000 picoseconds.
+		Weight::from_parts(12_780_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -87,44 +88,64 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 631_000 picoseconds.
-		Weight::from_parts(3_086_000, 0)
+		// Minimum execution time: 825_000 picoseconds.
+		Weight::from_parts(890_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	fn check_nonce() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 3_446_000 picoseconds.
-		Weight::from_parts(5_911_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `101`
+		//  Estimated: `3593`
+		// Minimum execution time: 10_159_000 picoseconds.
+		Weight::from_parts(10_461_000, 0)
+			.saturating_add(Weight::from_parts(0, 3593))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	fn check_spec_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 481_000 picoseconds.
-		Weight::from_parts(2_916_000, 0)
+		// Minimum execution time: 578_000 picoseconds.
+		Weight::from_parts(660_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 501_000 picoseconds.
-		Weight::from_parts(2_595_000, 0)
+		// Minimum execution time: 618_000 picoseconds.
+		Weight::from_parts(682_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
 	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `System::BlockWeight` (r:1 w:1)
 	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `24`
 		//  Estimated: `1533`
-		// Minimum execution time: 3_927_000 picoseconds.
-		Weight::from_parts(6_613_000, 0)
+		// Minimum execution time: 9_964_000 picoseconds.
+		Weight::from_parts(10_419_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `1533`
+		// Minimum execution time: 4_890_000 picoseconds.
+		Weight::from_parts(5_163_000, 0)
 			.saturating_add(Weight::from_parts(0, 1533))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs
index b0f986768f4..442b58635f4 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs
@@ -15,6 +15,7 @@
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml
index 3fabea3b02f..3dba65ae99f 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml
@@ -72,11 +72,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-collator-selection = { workspace = true }
 parachain-info = { workspace = true }
@@ -151,11 +151,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking/std",
 	"frame-executive/std",
@@ -230,6 +230,7 @@ runtime-benchmarks = [
 	"bridge-runtime-common/runtime-benchmarks",
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -272,6 +273,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
index 88146cecb9e..35af034310d 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
@@ -124,20 +124,22 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 pub type BlockId = generic::BlockId<Block>;
 
 /// The TransactionExtension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	BridgeRejectObsoleteHeadersAndMessages,
-	(bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages,),
-	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+		BridgeRejectObsoleteHeadersAndMessages,
+		(bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages,),
+		frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	),
+>;
 
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -313,6 +315,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -555,6 +561,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 2,
 		ParachainInfo: parachain_info = 3,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 4,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -667,6 +674,7 @@ mod benches {
 		[pallet_collator_selection, CollatorSelection]
 		[cumulus_pallet_parachain_system, ParachainSystem]
 		[cumulus_pallet_xcmp_queue, XcmpQueue]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 		// XCM
 		[pallet_xcm, PalletXcmExtrinsicsBenchmark::<Runtime>]
 		// NOTE: Make sure you point to the individual modules below.
@@ -1547,41 +1555,44 @@ mod tests {
 		use bp_polkadot_core::SuffixedCommonTransactionExtensionExt;
 
 		sp_io::TestExternalities::default().execute_with(|| {
-            frame_system::BlockHash::<Runtime>::insert(BlockNumber::zero(), Hash::default());
-            let payload: TxExtension = (
-                frame_system::CheckNonZeroSender::new(),
-                frame_system::CheckSpecVersion::new(),
-                frame_system::CheckTxVersion::new(),
-                frame_system::CheckGenesis::new(),
-                frame_system::CheckEra::from(Era::Immortal),
-                frame_system::CheckNonce::from(10),
-                frame_system::CheckWeight::new(),
-                pallet_transaction_payment::ChargeTransactionPayment::from(10),
-                BridgeRejectObsoleteHeadersAndMessages,
-                (
-                    bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),
-                ),
-                frame_metadata_hash_extension::CheckMetadataHash::new(false),
-				cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(),
-            );
-
-            // for BridgeHubRococo
-            {
-                let bhr_indirect_payload = bp_bridge_hub_rococo::TransactionExtension::from_params(
-                    VERSION.spec_version,
-                    VERSION.transaction_version,
-                    bp_runtime::TransactionEra::Immortal,
-                    System::block_hash(BlockNumber::zero()),
-                    10,
-                    10,
-                    (((), ()), ((), ())),
-                );
-                assert_eq!(payload.encode().split_last().unwrap().1, bhr_indirect_payload.encode());
-                assert_eq!(
-                    TxExtension::implicit(&payload).unwrap().encode().split_last().unwrap().1,
-                    sp_runtime::traits::TransactionExtension::<RuntimeCall>::implicit(&bhr_indirect_payload).unwrap().encode()
-                )
-            }
-        });
+			frame_system::BlockHash::<Runtime>::insert(BlockNumber::zero(), Hash::default());
+			let payload: TxExtension = (
+				frame_system::CheckNonZeroSender::new(),
+				frame_system::CheckSpecVersion::new(),
+				frame_system::CheckTxVersion::new(),
+				frame_system::CheckGenesis::new(),
+				frame_system::CheckEra::from(Era::Immortal),
+				frame_system::CheckNonce::from(10),
+				frame_system::CheckWeight::new(),
+				pallet_transaction_payment::ChargeTransactionPayment::from(10),
+				BridgeRejectObsoleteHeadersAndMessages,
+				(
+					bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),
+				),
+				frame_metadata_hash_extension::CheckMetadataHash::new(false),
+			).into();
+
+			// for BridgeHubRococo
+			{
+				let bhr_indirect_payload = bp_bridge_hub_rococo::TransactionExtension::from_params(
+					VERSION.spec_version,
+					VERSION.transaction_version,
+					bp_runtime::TransactionEra::Immortal,
+					System::block_hash(BlockNumber::zero()),
+					10,
+					10,
+					(((), ()), ((), ())),
+				);
+				assert_eq!(payload.encode().split_last().unwrap().1, bhr_indirect_payload.encode());
+				assert_eq!(
+					TxExtension::implicit(&payload).unwrap().encode().split_last().unwrap().1,
+					sp_runtime::traits::TransactionExtension::<RuntimeCall>::implicit(
+						&bhr_indirect_payload
+					)
+					.unwrap()
+					.encode()
+				)
+			}
+		});
 	}
 }
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 00000000000..ca1d8dcbe56
--- /dev/null
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=bridge-hub-rococo-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 6_988_000 picoseconds.
+		Weight::from_parts(7_361_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs
index 64eef1b4f74..93fb6f3bbbe 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/frame_system_extensions.rs
@@ -16,28 +16,26 @@
 
 //! Autogenerated weights for `frame_system_extensions`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/release/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
 // --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
 // --pallet=frame_system_extensions
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
+// --chain=bridge-hub-rococo-dev
 // --header=./cumulus/file_header.txt
 // --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/
-// --chain=bridge-hub-rococo-dev
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -50,81 +48,92 @@ use core::marker::PhantomData;
 /// Weight functions for `frame_system_extensions`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<T> {
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_genesis() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `54`
-		//  Estimated: `3509`
-		// Minimum execution time: 3_136_000 picoseconds.
-		Weight::from_parts(5_842_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 4_211_000 picoseconds.
+		Weight::from_parts(4_470_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_mortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_771_000 picoseconds.
-		Weight::from_parts(8_857_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 8_792_000 picoseconds.
+		Weight::from_parts(9_026_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_immortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_771_000 picoseconds.
-		Weight::from_parts(8_857_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 8_700_000 picoseconds.
+		Weight::from_parts(9_142_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_non_zero_sender() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 732_000 picoseconds.
-		Weight::from_parts(2_875_000, 0)
+		// Minimum execution time: 487_000 picoseconds.
+		Weight::from_parts(534_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	fn check_nonce() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 3_627_000 picoseconds.
-		Weight::from_parts(6_322_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `101`
+		//  Estimated: `3593`
+		// Minimum execution time: 6_719_000 picoseconds.
+		Weight::from_parts(6_846_000, 0)
+			.saturating_add(Weight::from_parts(0, 3593))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	fn check_spec_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 471_000 picoseconds.
-		Weight::from_parts(2_455_000, 0)
+		// Minimum execution time: 410_000 picoseconds.
+		Weight::from_parts(442_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 491_000 picoseconds.
-		Weight::from_parts(2_916_000, 0)
+		// Minimum execution time: 390_000 picoseconds.
+		Weight::from_parts(425_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
 	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `System::BlockWeight` (r:1 w:1)
 	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `24`
 		//  Estimated: `1533`
-		// Minimum execution time: 3_798_000 picoseconds.
-		Weight::from_parts(6_272_000, 0)
+		// Minimum execution time: 5_965_000 picoseconds.
+		Weight::from_parts(6_291_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `1533`
+		// Minimum execution time: 2_738_000 picoseconds.
+		Weight::from_parts(2_915_000, 0)
 			.saturating_add(Weight::from_parts(0, 1533))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs
index 74796e626a2..7a0accf2e7a 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs
@@ -24,6 +24,7 @@ use ::pallet_bridge_relayers::WeightInfo as _;
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs
index d5baa1c71df..c40aae5a82a 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs
@@ -184,8 +184,8 @@ fn construct_extrinsic(
 		BridgeRejectObsoleteHeadersAndMessages::default(),
 		(OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),),
 		frame_metadata_hash_extension::CheckMetadataHash::<Runtime>::new(false),
-		cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(),
-	);
+	)
+		.into();
 	let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap();
 	let signature = payload.using_encoded(|e| sender.sign(e));
 	UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), tx_ext)
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs
index 8d74b221a60..b0f4366e29c 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs
@@ -63,7 +63,6 @@ fn construct_extrinsic(
 		BridgeRejectObsoleteHeadersAndMessages::default(),
 		(bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),),
 		frame_metadata_hash_extension::CheckMetadataHash::new(false),
-		cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(),
 	)
 		.into();
 	let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap();
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml
index 644aa72d131..444023eac72 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml
@@ -72,11 +72,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 
 pallet-collator-selection = { workspace = true }
@@ -148,11 +148,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking/std",
 	"frame-executive/std",
@@ -227,6 +227,7 @@ runtime-benchmarks = [
 	"bridge-runtime-common/runtime-benchmarks",
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -269,6 +270,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
index 1ca709f0d8c..2c2e01b4d21 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
@@ -120,20 +120,22 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 pub type BlockId = generic::BlockId<Block>;
 
 /// The TransactionExtension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	BridgeRejectObsoleteHeadersAndMessages,
-	(bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages,),
-	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+		BridgeRejectObsoleteHeadersAndMessages,
+		(bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages,),
+		frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	),
+>;
 
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -299,6 +301,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -532,6 +538,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 2,
 		ParachainInfo: parachain_info = 3,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 4,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -622,6 +629,7 @@ mod benches {
 		[snowbridge_pallet_outbound_queue, EthereumOutboundQueue]
 		[snowbridge_pallet_system, EthereumSystem]
 		[snowbridge_pallet_ethereum_client, EthereumBeaconClient]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
@@ -1369,40 +1377,43 @@ mod tests {
 		use bp_polkadot_core::SuffixedCommonTransactionExtensionExt;
 
 		sp_io::TestExternalities::default().execute_with(|| {
-            frame_system::BlockHash::<Runtime>::insert(BlockNumber::zero(), Hash::default());
-            let payload: TxExtension = (
-                frame_system::CheckNonZeroSender::new(),
-                frame_system::CheckSpecVersion::new(),
-                frame_system::CheckTxVersion::new(),
-                frame_system::CheckGenesis::new(),
-                frame_system::CheckEra::from(Era::Immortal),
-                frame_system::CheckNonce::from(10),
-                frame_system::CheckWeight::new(),
-                pallet_transaction_payment::ChargeTransactionPayment::from(10),
-                BridgeRejectObsoleteHeadersAndMessages,
-                (
-                    bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),
-                ),
+			frame_system::BlockHash::<Runtime>::insert(BlockNumber::zero(), Hash::default());
+			let payload: TxExtension = (
+				frame_system::CheckNonZeroSender::new(),
+				frame_system::CheckSpecVersion::new(),
+				frame_system::CheckTxVersion::new(),
+				frame_system::CheckGenesis::new(),
+				frame_system::CheckEra::from(Era::Immortal),
+				frame_system::CheckNonce::from(10),
+				frame_system::CheckWeight::new(),
+				pallet_transaction_payment::ChargeTransactionPayment::from(10),
+				BridgeRejectObsoleteHeadersAndMessages,
+				(
+					bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),
+				),
 				frame_metadata_hash_extension::CheckMetadataHash::new(false),
-                cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(),
-            );
-
-            {
-                let bh_indirect_payload = bp_bridge_hub_westend::TransactionExtension::from_params(
-                    VERSION.spec_version,
-                    VERSION.transaction_version,
-                    bp_runtime::TransactionEra::Immortal,
-                    System::block_hash(BlockNumber::zero()),
-                    10,
-                    10,
-                    (((), ()), ((), ())),
-                );
-                assert_eq!(payload.encode().split_last().unwrap().1, bh_indirect_payload.encode());
-                assert_eq!(
-                    TxExtension::implicit(&payload).unwrap().encode().split_last().unwrap().1,
-                    sp_runtime::traits::TransactionExtension::<RuntimeCall>::implicit(&bh_indirect_payload).unwrap().encode()
-                )
-            }
-        });
+			).into();
+
+			{
+				let bh_indirect_payload = bp_bridge_hub_westend::TransactionExtension::from_params(
+					VERSION.spec_version,
+					VERSION.transaction_version,
+					bp_runtime::TransactionEra::Immortal,
+					System::block_hash(BlockNumber::zero()),
+					10,
+					10,
+					(((), ()), ((), ())),
+				);
+				assert_eq!(payload.encode().split_last().unwrap().1, bh_indirect_payload.encode());
+				assert_eq!(
+					TxExtension::implicit(&payload).unwrap().encode().split_last().unwrap().1,
+					sp_runtime::traits::TransactionExtension::<RuntimeCall>::implicit(
+						&bh_indirect_payload
+					)
+					.unwrap()
+					.encode()
+				)
+			}
+		});
 	}
 }
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 00000000000..955b2732545
--- /dev/null
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=bridge-hub-westend-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 6_810_000 picoseconds.
+		Weight::from_parts(7_250_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs
index 459b137d3b8..21cadac25e1 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/frame_system_extensions.rs
@@ -16,28 +16,26 @@
 
 //! Autogenerated weights for `frame_system_extensions`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/release/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
 // --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
 // --pallet=frame_system_extensions
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
+// --chain=bridge-hub-westend-dev
 // --header=./cumulus/file_header.txt
 // --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/
-// --chain=bridge-hub-westend-dev
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -50,81 +48,92 @@ use core::marker::PhantomData;
 /// Weight functions for `frame_system_extensions`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<T> {
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_genesis() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `54`
-		//  Estimated: `3509`
-		// Minimum execution time: 3_166_000 picoseconds.
-		Weight::from_parts(6_021_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 4_363_000 picoseconds.
+		Weight::from_parts(4_521_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_mortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_651_000 picoseconds.
-		Weight::from_parts(9_177_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 8_522_000 picoseconds.
+		Weight::from_parts(8_847_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_immortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_651_000 picoseconds.
-		Weight::from_parts(9_177_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 8_617_000 picoseconds.
+		Weight::from_parts(8_789_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_non_zero_sender() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 601_000 picoseconds.
-		Weight::from_parts(2_805_000, 0)
+		// Minimum execution time: 485_000 picoseconds.
+		Weight::from_parts(557_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	fn check_nonce() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 3_727_000 picoseconds.
-		Weight::from_parts(6_051_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `101`
+		//  Estimated: `3593`
+		// Minimum execution time: 6_682_000 picoseconds.
+		Weight::from_parts(6_821_000, 0)
+			.saturating_add(Weight::from_parts(0, 3593))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	fn check_spec_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 471_000 picoseconds.
-		Weight::from_parts(2_494_000, 0)
+		// Minimum execution time: 390_000 picoseconds.
+		Weight::from_parts(441_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 521_000 picoseconds.
-		Weight::from_parts(2_655_000, 0)
+		// Minimum execution time: 395_000 picoseconds.
+		Weight::from_parts(455_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
 	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `System::BlockWeight` (r:1 w:1)
 	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `24`
 		//  Estimated: `1533`
-		// Minimum execution time: 3_808_000 picoseconds.
-		Weight::from_parts(6_402_000, 0)
+		// Minimum execution time: 6_134_000 picoseconds.
+		Weight::from_parts(6_308_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `1533`
+		// Minimum execution time: 2_764_000 picoseconds.
+		Weight::from_parts(2_893_000, 0)
 			.saturating_add(Weight::from_parts(0, 1533))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs
index c1c5c337aca..313da55831c 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs
@@ -24,6 +24,7 @@ use ::pallet_bridge_relayers::WeightInfo as _;
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs
index d71400fa71b..bc570ef7f74 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs
@@ -185,8 +185,8 @@ fn construct_extrinsic(
 		BridgeRejectObsoleteHeadersAndMessages::default(),
 		(bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),),
 		frame_metadata_hash_extension::CheckMetadataHash::<Runtime>::new(false),
-		cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(),
-	);
+	)
+		.into();
 	let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap();
 	let signature = payload.using_encoded(|e| sender.sign(e));
 	UncheckedExtrinsic::new_signed(call, account_id.into(), Signature::Sr25519(signature), extra)
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs
index 9d32f28f4fc..d7e7fbe0c72 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs
@@ -95,7 +95,6 @@ fn construct_extrinsic(
 		BridgeRejectObsoleteHeadersAndMessages::default(),
 		(bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),),
 		frame_metadata_hash_extension::CheckMetadataHash::new(false),
-		cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(),
 	)
 		.into();
 	let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap();
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml
index 9c70b65060d..2786321e48e 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml
@@ -77,11 +77,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-message-queue = { workspace = true }
 
@@ -103,6 +103,7 @@ default = ["std"]
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -143,6 +144,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
@@ -182,11 +184,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking?/std",
 	"frame-executive/std",
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
index d3cd285ba67..e9adc4d1eae 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
@@ -191,6 +191,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -669,6 +673,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 2,
 		ParachainInfo: parachain_info = 3,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 4,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -735,16 +740,19 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 /// BlockId type as expected by this runtime.
 pub type BlockId = generic::BlockId<Block>;
 /// The extension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+	),
+>;
+
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
 	generic::UncheckedExtrinsic<Address, RuntimeCall, Signature, TxExtension>;
@@ -806,6 +814,7 @@ mod benches {
 		[pallet_salary, AmbassadorSalary]
 		[pallet_treasury, FellowshipTreasury]
 		[pallet_asset_rate, AssetRate]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 00000000000..c286ba13202
--- /dev/null
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=collectives-westend-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 6_745_000 picoseconds.
+		Weight::from_parts(6_948_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs
index f32f2730313..8c2abcd4e8c 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system_extensions.rs
@@ -16,28 +16,26 @@
 
 //! Autogenerated weights for `frame_system_extensions`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/release/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
 // --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
 // --pallet=frame_system_extensions
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
+// --chain=collectives-westend-dev
 // --header=./cumulus/file_header.txt
 // --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/
-// --chain=collectives-westend-dev
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -50,81 +48,92 @@ use core::marker::PhantomData;
 /// Weight functions for `frame_system_extensions`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<T> {
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_genesis() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `54`
-		//  Estimated: `3509`
-		// Minimum execution time: 3_497_000 picoseconds.
-		Weight::from_parts(5_961_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 4_206_000 picoseconds.
+		Weight::from_parts(4_485_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_mortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_240_000 picoseconds.
-		Weight::from_parts(8_175_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 7_537_000 picoseconds.
+		Weight::from_parts(7_706_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_immortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_240_000 picoseconds.
-		Weight::from_parts(8_175_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Estimated: `0`
+		// Minimum execution time: 7_512_000 picoseconds.
+		Weight::from_parts(7_655_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_non_zero_sender() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 671_000 picoseconds.
-		Weight::from_parts(3_005_000, 0)
+		// Minimum execution time: 447_000 picoseconds.
+		Weight::from_parts(499_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	fn check_nonce() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 3_426_000 picoseconds.
-		Weight::from_parts(6_131_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `101`
+		//  Estimated: `3593`
+		// Minimum execution time: 6_667_000 picoseconds.
+		Weight::from_parts(6_868_000, 0)
+			.saturating_add(Weight::from_parts(0, 3593))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	fn check_spec_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 501_000 picoseconds.
-		Weight::from_parts(2_715_000, 0)
+		// Minimum execution time: 389_000 picoseconds.
+		Weight::from_parts(420_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 491_000 picoseconds.
-		Weight::from_parts(2_635_000, 0)
+		// Minimum execution time: 379_000 picoseconds.
+		Weight::from_parts(420_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
 	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `System::BlockWeight` (r:1 w:1)
 	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `24`
 		//  Estimated: `1533`
-		// Minimum execution time: 3_958_000 picoseconds.
-		Weight::from_parts(6_753_000, 0)
+		// Minimum execution time: 6_330_000 picoseconds.
+		Weight::from_parts(6_605_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `1533`
+		// Minimum execution time: 2_784_000 picoseconds.
+		Weight::from_parts(2_960_000, 0)
 			.saturating_add(Weight::from_parts(0, 1533))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs
index 00b3bd92d5e..a1663dc98a3 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs
@@ -15,6 +15,7 @@
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml
index cb0655d70cf..067c4df3b53 100644
--- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml
@@ -70,11 +70,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-message-queue = { workspace = true }
 
@@ -90,11 +90,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking?/std",
 	"frame-executive/std",
@@ -148,6 +148,7 @@ std = [
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -179,6 +180,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
index be369565dba..3348a635df0 100644
--- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
@@ -88,17 +88,19 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 /// BlockId type as expected by this runtime.
 pub type BlockId = generic::BlockId<Block>;
 /// The extension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	),
+>;
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
 	generic::UncheckedExtrinsic<Address, RuntimeCall, Signature, TxExtension>;
@@ -201,6 +203,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = ();
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -402,6 +408,7 @@ construct_runtime!(
 		RandomnessCollectiveFlip: pallet_insecure_randomness_collective_flip = 2,
 		Timestamp: pallet_timestamp = 3,
 		ParachainInfo: parachain_info = 4,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 5,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -448,6 +455,7 @@ mod benches {
 		[cumulus_pallet_parachain_system, ParachainSystem]
 		[pallet_contracts, Contracts]
 		[pallet_xcm, PalletXcmExtrinsicsBenchmark::<Runtime>]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml
index 2b5fab32929..668b4cc6c7b 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/Cargo.toml
@@ -71,11 +71,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-collator-selection = { workspace = true }
 parachain-info = { workspace = true }
@@ -92,11 +92,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking?/std",
 	"frame-executive/std",
@@ -154,6 +154,7 @@ std = [
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -186,6 +187,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
index c4d43e4361f..e9171c79afa 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
@@ -99,18 +99,20 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 pub type BlockId = generic::BlockId<Block>;
 
 /// The TransactionExtension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+		frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	),
+>;
 
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -221,6 +223,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -622,6 +628,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 3,
 		ParachainInfo: parachain_info = 4,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 5,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -672,6 +679,7 @@ mod benches {
 		// NOTE: Make sure you point to the individual modules below.
 		[pallet_xcm_benchmarks::fungible, XcmBalances]
 		[pallet_xcm_benchmarks::generic, XcmGeneric]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 00000000000..6298fd6e7f6
--- /dev/null
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=coretime-rococo-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 6_638_000 picoseconds.
+		Weight::from_parts(6_806_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs
index a4d09696a1a..04b695b5769 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/frame_system_extensions.rs
@@ -129,4 +129,18 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 3_687_000 picoseconds.
+		Weight::from_parts(6_192_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
 }
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs
index 24c4f50e6ab..7fee4a728b9 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/mod.rs
@@ -19,6 +19,7 @@
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml
index 03df782bc26..915926ff989 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/Cargo.toml
@@ -70,11 +70,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 
 pallet-collator-selection = { workspace = true }
@@ -92,11 +92,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking?/std",
 	"frame-executive/std",
@@ -152,6 +152,7 @@ std = [
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -183,6 +184,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
index 431bfc8a63b..975856b3b6f 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
@@ -99,18 +99,20 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 pub type BlockId = generic::BlockId<Block>;
 
 /// The TransactionExtension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+		frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	),
+>;
 
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -221,6 +223,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -617,6 +623,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 3,
 		ParachainInfo: parachain_info = 4,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 5,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -664,6 +671,7 @@ mod benches {
 		// NOTE: Make sure you point to the individual modules below.
 		[pallet_xcm_benchmarks::fungible, XcmBalances]
 		[pallet_xcm_benchmarks::generic, XcmGeneric]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 00000000000..55d52f4b04c
--- /dev/null
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=coretime-westend-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 6_658_000 picoseconds.
+		Weight::from_parts(6_905_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs
index d928b73613a..9527e0c5549 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/frame_system_extensions.rs
@@ -129,4 +129,18 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 3_687_000 picoseconds.
+		Weight::from_parts(6_192_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
 }
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs
index 24c4f50e6ab..7fee4a728b9 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/mod.rs
@@ -19,6 +19,7 @@
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs
index 763f8abea34..75f45297fe2 100644
--- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs
@@ -300,6 +300,7 @@ pub type TxExtension = (
 	frame_system::CheckEra<Runtime>,
 	frame_system::CheckNonce<Runtime>,
 	frame_system::CheckWeight<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs
index 4fbbb8d6f78..db9a14e2cf2 100644
--- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system_extensions.rs
@@ -16,28 +16,30 @@
 
 //! Autogenerated weights for `frame_system_extensions`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor`
-//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024
+//! HOSTNAME: `697235d969a1`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/release/polkadot-parachain
+// frame-omni-bencher
+// v1
 // benchmark
 // pallet
-// --wasm-execution=compiled
+// --extrinsic=*
+// --runtime=target/release/wbuild/glutton-westend-runtime/glutton_westend_runtime.wasm
 // --pallet=frame_system_extensions
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights
+// --wasm-execution=compiled
+// --steps=50
+// --repeat=20
+// --heap-pages=4096
 // --no-storage-info
-// --no-median-slopes
 // --no-min-squares
-// --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
-// --header=./cumulus/file_header.txt
-// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/
-// --chain=glutton-westend-dev-1300
+// --no-median-slopes
+// --genesis-builder-policy=none
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -54,10 +56,10 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_genesis() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `54`
+		//  Measured:  `0`
 		//  Estimated: `3509`
-		// Minimum execution time: 3_908_000 picoseconds.
-		Weight::from_parts(4_007_000, 0)
+		// Minimum execution time: 2_572_000 picoseconds.
+		Weight::from_parts(2_680_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -65,10 +67,10 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_mortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
+		//  Measured:  `0`
 		//  Estimated: `3509`
-		// Minimum execution time: 5_510_000 picoseconds.
-		Weight::from_parts(6_332_000, 0)
+		// Minimum execution time: 5_818_000 picoseconds.
+		Weight::from_parts(6_024_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -76,10 +78,10 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_immortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
+		//  Measured:  `14`
 		//  Estimated: `3509`
-		// Minimum execution time: 5_510_000 picoseconds.
-		Weight::from_parts(6_332_000, 0)
+		// Minimum execution time: 7_364_000 picoseconds.
+		Weight::from_parts(7_676_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -87,44 +89,52 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 651_000 picoseconds.
-		Weight::from_parts(851_000, 0)
+		// Minimum execution time: 657_000 picoseconds.
+		Weight::from_parts(686_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`)
 	fn check_nonce() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 3_387_000 picoseconds.
-		Weight::from_parts(3_646_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Estimated: `3529`
+		// Minimum execution time: 6_931_000 picoseconds.
+		Weight::from_parts(7_096_000, 0)
+			.saturating_add(Weight::from_parts(0, 3529))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	fn check_spec_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 491_000 picoseconds.
-		Weight::from_parts(651_000, 0)
+		// Minimum execution time: 518_000 picoseconds.
+		Weight::from_parts(539_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 451_000 picoseconds.
-		Weight::from_parts(662_000, 0)
+		// Minimum execution time: 530_000 picoseconds.
+		Weight::from_parts(550_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
-	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `24`
-		//  Estimated: `1489`
-		// Minimum execution time: 3_537_000 picoseconds.
-		Weight::from_parts(4_208_000, 0)
-			.saturating_add(Weight::from_parts(0, 1489))
-			.saturating_add(T::DbWeight::get().reads(1))
-			.saturating_add(T::DbWeight::get().writes(1))
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 5_691_000 picoseconds.
+		Weight::from_parts(5_955_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
+	}
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 3_249_000 picoseconds.
+		Weight::from_parts(3_372_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 }
diff --git a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml
index de2898046c0..6391f8c3eeb 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/people/people-rococo/Cargo.toml
@@ -68,11 +68,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-collator-selection = { workspace = true }
 parachain-info = { workspace = true }
@@ -89,11 +89,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"enumflags2/std",
 	"frame-benchmarking?/std",
@@ -150,6 +150,7 @@ std = [
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -182,6 +183,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
index ef3c90ace82..ffdd86c500e 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
@@ -92,17 +92,19 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 pub type BlockId = generic::BlockId<Block>;
 
 /// The TransactionExtension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	),
+>;
 
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -196,6 +198,10 @@ impl frame_system::Config for Runtime {
 	type MultiBlockMigrator = MultiBlockMigrations;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -567,6 +573,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 2,
 		ParachainInfo: parachain_info = 3,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 4,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -626,6 +633,7 @@ mod benches {
 		[pallet_xcm, PalletXcmExtrinsicsBenchmark::<Runtime>]
 		[pallet_xcm_benchmarks::fungible, XcmBalances]
 		[pallet_xcm_benchmarks::generic, XcmGeneric]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 00000000000..439855f8571
--- /dev/null
+++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=people-rococo-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 7_097_000 picoseconds.
+		Weight::from_parts(7_419_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs
index fb2b69e23e8..3f12b25540e 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/frame_system_extensions.rs
@@ -129,4 +129,18 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 3_687_000 picoseconds.
+		Weight::from_parts(6_192_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
 }
diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs
index fab3c629ab3..81906a11fe1 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/mod.rs
@@ -17,6 +17,7 @@
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml
index 65bc8264934..fae0fd2e333 100644
--- a/cumulus/parachains/runtimes/people/people-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/people/people-westend/Cargo.toml
@@ -68,11 +68,11 @@ xcm-runtime-apis = { workspace = true }
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 cumulus-pallet-session-benchmarking = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-collator-selection = { workspace = true }
 parachain-info = { workspace = true }
@@ -89,11 +89,11 @@ std = [
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
 	"cumulus-pallet-session-benchmarking/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"enumflags2/std",
 	"frame-benchmarking?/std",
@@ -150,6 +150,7 @@ std = [
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
@@ -182,6 +183,7 @@ runtime-benchmarks = [
 try-runtime = [
 	"cumulus-pallet-aura-ext/try-runtime",
 	"cumulus-pallet-parachain-system/try-runtime",
+	"cumulus-pallet-weight-reclaim/try-runtime",
 	"cumulus-pallet-xcm/try-runtime",
 	"cumulus-pallet-xcmp-queue/try-runtime",
 	"frame-executive/try-runtime",
diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
index ebf8fcb33bd..ee6b0db55b9 100644
--- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
@@ -92,17 +92,19 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 pub type BlockId = generic::BlockId<Block>;
 
 /// The transactionExtension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	),
+>;
 
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -195,6 +197,10 @@ impl frame_system::Config for Runtime {
 	type MultiBlockMigrator = MultiBlockMigrations;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo<Runtime>;
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -566,6 +572,7 @@ construct_runtime!(
 		ParachainSystem: cumulus_pallet_parachain_system = 1,
 		Timestamp: pallet_timestamp = 2,
 		ParachainInfo: parachain_info = 3,
+		WeightReclaim: cumulus_pallet_weight_reclaim = 4,
 
 		// Monetary stuff.
 		Balances: pallet_balances = 10,
@@ -624,6 +631,7 @@ mod benches {
 		[pallet_xcm, PalletXcmExtrinsicsBenchmark::<Runtime>]
 		[pallet_xcm_benchmarks::fungible, XcmBalances]
 		[pallet_xcm_benchmarks::generic, XcmGeneric]
+		[cumulus_pallet_weight_reclaim, WeightReclaim]
 	);
 }
 
diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_weight_reclaim.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_weight_reclaim.rs
new file mode 100644
index 00000000000..fd3018ec974
--- /dev/null
+++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_weight_reclaim.rs
@@ -0,0 +1,67 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `cumulus_pallet_weight_reclaim`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=cumulus_pallet_weight_reclaim
+// --chain=people-westend-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `cumulus_pallet_weight_reclaim`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo<T> {
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	/// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1)
+	/// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:0)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn storage_weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 7_006_000 picoseconds.
+		Weight::from_parts(7_269_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(3))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+}
diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs
index 0a4b9e8e268..422b8566ad0 100644
--- a/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs
+++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/frame_system_extensions.rs
@@ -129,4 +129,18 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `System::BlockWeight` (r:1 w:1)
+	/// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1533`
+		// Minimum execution time: 3_687_000 picoseconds.
+		Weight::from_parts(6_192_000, 0)
+			.saturating_add(Weight::from_parts(0, 1533))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
 }
diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs
index fab3c629ab3..81906a11fe1 100644
--- a/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/mod.rs
@@ -17,6 +17,7 @@
 
 pub mod block_weights;
 pub mod cumulus_pallet_parachain_system;
+pub mod cumulus_pallet_weight_reclaim;
 pub mod cumulus_pallet_xcmp_queue;
 pub mod extrinsic_weights;
 pub mod frame_system;
diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
index 51dc95bf2c7..38ddf3bc199 100644
--- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
+++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs
@@ -140,6 +140,7 @@ pub type TxExtension = (
 	frame_system::CheckNonce<Runtime>,
 	frame_system::CheckWeight<Runtime>,
 	pallet_asset_tx_payment::ChargeAssetTxPayment<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 
 /// Unchecked extrinsic type as expected by this runtime.
diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml
index e8761445f16..826a2e9764f 100644
--- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml
+++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml
@@ -51,12 +51,12 @@ xcm-executor = { workspace = true }
 # Cumulus
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-pallet-xcm = { workspace = true }
 cumulus-pallet-xcmp-queue = { workspace = true }
 cumulus-ping = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 cumulus-primitives-utility = { workspace = true }
 pallet-message-queue = { workspace = true }
 parachain-info = { workspace = true }
@@ -72,12 +72,12 @@ std = [
 	"codec/std",
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-pallet-xcm/std",
 	"cumulus-pallet-xcmp-queue/std",
 	"cumulus-ping/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"cumulus-primitives-utility/std",
 	"frame-benchmarking?/std",
 	"frame-executive/std",
@@ -117,6 +117,7 @@ std = [
 ]
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-primitives-utility/runtime-benchmarks",
diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs
index 42556e0b493..89cd17d5450 100644
--- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs
+++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs
@@ -226,6 +226,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = ();
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
@@ -617,6 +621,7 @@ construct_runtime! {
 		Timestamp: pallet_timestamp,
 		Sudo: pallet_sudo,
 		TransactionPayment: pallet_transaction_payment,
+		WeightReclaim: cumulus_pallet_weight_reclaim,
 
 		ParachainSystem: cumulus_pallet_parachain_system = 20,
 		ParachainInfo: parachain_info = 21,
@@ -657,17 +662,20 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 /// BlockId type as expected by this runtime.
 pub type BlockId = generic::BlockId<Block>;
 /// The extension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	),
+>;
+
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
 	generic::UncheckedExtrinsic<Address, RuntimeCall, Signature, TxExtension>;
diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs
index 5cbe662e270..62ff6081190 100644
--- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs
+++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs
@@ -100,15 +100,30 @@ pub fn get_proof_size() -> Option<u64> {
 	(proof_size != PROOF_RECORDING_DISABLED).then_some(proof_size)
 }
 
-/// Storage weight reclaim mechanism.
-///
-/// This extension checks the size of the node-side storage proof
-/// before and after executing a given extrinsic. The difference between
-/// benchmarked and spent weight can be reclaimed.
-#[derive(Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)]
-#[scale_info(skip_type_params(T))]
-pub struct StorageWeightReclaim<T: Config + Send + Sync>(PhantomData<T>);
+// Encapsulate into a mod so that macro generated code doesn't trigger a warning about deprecated
+// usage.
+#[allow(deprecated)]
+mod allow_deprecated {
+	use super::*;
+
+	/// Storage weight reclaim mechanism.
+	///
+	/// This extension checks the size of the node-side storage proof
+	/// before and after executing a given extrinsic. The difference between
+	/// benchmarked and spent weight can be reclaimed.
+	#[deprecated(note = "This extension doesn't provide accurate reclaim for storage intensive \
+		transaction extension pipeline; it ignores the validation and preparation of extensions prior \
+		to itself and ignores the post dispatch logic for extensions subsequent to itself, it also
+		doesn't provide weight information. \
+		Use `StorageWeightReclaim` in the `cumulus-pallet-weight-reclaim` crate")]
+	#[derive(Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)]
+	#[scale_info(skip_type_params(T))]
+	pub struct StorageWeightReclaim<T: Config + Send + Sync>(pub(super) PhantomData<T>);
+}
+#[allow(deprecated)]
+pub use allow_deprecated::StorageWeightReclaim;
 
+#[allow(deprecated)]
 impl<T: Config + Send + Sync> StorageWeightReclaim<T> {
 	/// Create a new `StorageWeightReclaim` instance.
 	pub fn new() -> Self {
@@ -116,6 +131,7 @@ impl<T: Config + Send + Sync> StorageWeightReclaim<T> {
 	}
 }
 
+#[allow(deprecated)]
 impl<T: Config + Send + Sync> core::fmt::Debug for StorageWeightReclaim<T> {
 	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
 		let _ = write!(f, "StorageWeightReclaim");
@@ -123,6 +139,7 @@ impl<T: Config + Send + Sync> core::fmt::Debug for StorageWeightReclaim<T> {
 	}
 }
 
+#[allow(deprecated)]
 impl<T: Config + Send + Sync> TransactionExtension<T::RuntimeCall> for StorageWeightReclaim<T>
 where
 	T::RuntimeCall: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>,
diff --git a/cumulus/primitives/storage-weight-reclaim/src/tests.rs b/cumulus/primitives/storage-weight-reclaim/src/tests.rs
index ab83762cc0d..379b39afee0 100644
--- a/cumulus/primitives/storage-weight-reclaim/src/tests.rs
+++ b/cumulus/primitives/storage-weight-reclaim/src/tests.rs
@@ -74,6 +74,7 @@ fn get_storage_weight() -> PerDispatchClass<Weight> {
 }
 
 #[test]
+#[allow(deprecated)]
 fn basic_refund() {
 	// The real cost will be 100 bytes of storage size
 	let mut test_ext = setup_test_externalities(&[0, 100]);
@@ -109,6 +110,7 @@ fn basic_refund() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn underestimating_refund() {
 	// We fixed a bug where `pre dispatch info weight > consumed weight > post info weight`
 	// resulted in error.
@@ -149,6 +151,7 @@ fn underestimating_refund() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn sets_to_node_storage_proof_if_higher() {
 	// The storage proof reported by the proof recorder is higher than what is stored on
 	// the runtime side.
@@ -240,6 +243,7 @@ fn sets_to_node_storage_proof_if_higher() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn does_nothing_without_extension() {
 	let mut test_ext = new_test_ext();
 
@@ -274,6 +278,7 @@ fn does_nothing_without_extension() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn negative_refund_is_added_to_weight() {
 	let mut test_ext = setup_test_externalities(&[100, 300]);
 
@@ -310,6 +315,7 @@ fn negative_refund_is_added_to_weight() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_zero_proof_size() {
 	let mut test_ext = setup_test_externalities(&[0, 0]);
 
@@ -340,6 +346,7 @@ fn test_zero_proof_size() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_larger_pre_dispatch_proof_size() {
 	let mut test_ext = setup_test_externalities(&[300, 100]);
 
@@ -374,6 +381,7 @@ fn test_larger_pre_dispatch_proof_size() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_incorporates_check_weight_unspent_weight() {
 	let mut test_ext = setup_test_externalities(&[100, 300]);
 
@@ -415,6 +423,7 @@ fn test_incorporates_check_weight_unspent_weight() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_incorporates_check_weight_unspent_weight_on_negative() {
 	let mut test_ext = setup_test_externalities(&[100, 300]);
 
@@ -456,6 +465,7 @@ fn test_incorporates_check_weight_unspent_weight_on_negative() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_nothing_relcaimed() {
 	let mut test_ext = setup_test_externalities(&[0, 100]);
 
@@ -505,6 +515,7 @@ fn test_nothing_relcaimed() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_incorporates_check_weight_unspent_weight_reverse_order() {
 	let mut test_ext = setup_test_externalities(&[100, 300]);
 
@@ -548,6 +559,7 @@ fn test_incorporates_check_weight_unspent_weight_reverse_order() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_incorporates_check_weight_unspent_weight_on_negative_reverse_order() {
 	let mut test_ext = setup_test_externalities(&[100, 300]);
 
@@ -616,6 +628,7 @@ fn storage_size_disabled_reported_correctly() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_reclaim_helper() {
 	let mut test_ext = setup_test_externalities(&[1000, 1300, 1800]);
 
@@ -635,6 +648,7 @@ fn test_reclaim_helper() {
 }
 
 #[test]
+#[allow(deprecated)]
 fn test_reclaim_helper_does_not_reclaim_negative() {
 	// Benchmarked weight does not change at all
 	let mut test_ext = setup_test_externalities(&[1000, 1300]);
@@ -669,6 +683,7 @@ fn get_benched_weight() -> Weight {
 /// Just here for doc purposes
 fn do_work() {}
 
+#[allow(deprecated)]
 #[docify::export_content(simple_reclaimer_example)]
 fn reclaim_with_weight_meter() {
 	let mut remaining_weight_meter = WeightMeter::with_limit(Weight::from_parts(10, 10));
diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml
index 2c72ca98f35..f64ee832ace 100644
--- a/cumulus/test/client/Cargo.toml
+++ b/cumulus/test/client/Cargo.toml
@@ -39,16 +39,17 @@ polkadot-parachain-primitives = { workspace = true, default-features = true }
 polkadot-primitives = { workspace = true, default-features = true }
 
 # Cumulus
+cumulus-pallet-weight-reclaim = { workspace = true, default-features = true }
 cumulus-primitives-core = { workspace = true, default-features = true }
 cumulus-primitives-parachain-inherent = { workspace = true, default-features = true }
 cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true }
 cumulus-test-relay-sproof-builder = { workspace = true, default-features = true }
 cumulus-test-runtime = { workspace = true }
 cumulus-test-service = { workspace = true }
 
 [features]
 runtime-benchmarks = [
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-test-service/runtime-benchmarks",
 	"frame-system/runtime-benchmarks",
diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs
index 26cf02b3dea..7861a42372a 100644
--- a/cumulus/test/client/src/lib.rs
+++ b/cumulus/test/client/src/lib.rs
@@ -143,7 +143,6 @@ pub fn generate_extrinsic_with_pair(
 		frame_system::CheckNonce::<Runtime>::from(nonce),
 		frame_system::CheckWeight::<Runtime>::new(),
 		pallet_transaction_payment::ChargeTransactionPayment::<Runtime>::from(tip),
-		cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::<Runtime>::new(),
 	)
 		.into();
 
@@ -152,7 +151,7 @@ pub fn generate_extrinsic_with_pair(
 	let raw_payload = SignedPayload::from_raw(
 		function.clone(),
 		tx_ext.clone(),
-		((), VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()),
+		((), VERSION.spec_version, genesis_block, current_block_hash, (), (), ()),
 	);
 	let signature = raw_payload.using_encoded(|e| origin.sign(e));
 
diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml
index 150838e5e96..4cc4f483c02 100644
--- a/cumulus/test/runtime/Cargo.toml
+++ b/cumulus/test/runtime/Cargo.toml
@@ -44,9 +44,9 @@ sp-version = { workspace = true }
 # Cumulus
 cumulus-pallet-aura-ext = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true }
 cumulus-primitives-aura = { workspace = true }
 cumulus-primitives-core = { workspace = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true }
 pallet-collator-selection = { workspace = true }
 parachain-info = { workspace = true }
 
@@ -59,9 +59,9 @@ std = [
 	"codec/std",
 	"cumulus-pallet-aura-ext/std",
 	"cumulus-pallet-parachain-system/std",
+	"cumulus-pallet-weight-reclaim/std",
 	"cumulus-primitives-aura/std",
 	"cumulus-primitives-core/std",
-	"cumulus-primitives-storage-weight-reclaim/std",
 	"frame-executive/std",
 	"frame-support/std",
 	"frame-system-rpc-runtime-api/std",
diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs
index 4abc10276af..01ce3427c1f 100644
--- a/cumulus/test/runtime/src/lib.rs
+++ b/cumulus/test/runtime/src/lib.rs
@@ -232,6 +232,10 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = ();
+}
+
 parameter_types! {
 	pub const MinimumPeriod: u64 = SLOT_DURATION / 2;
 	pub const PotId: PalletId = PalletId(*b"PotStake");
@@ -347,6 +351,7 @@ construct_runtime! {
 		Glutton: pallet_glutton,
 		Aura: pallet_aura,
 		AuraExt: cumulus_pallet_aura_ext,
+		WeightReclaim: cumulus_pallet_weight_reclaim,
 	}
 }
 
@@ -377,16 +382,18 @@ pub type SignedBlock = generic::SignedBlock<Block>;
 /// BlockId type as expected by this runtime.
 pub type BlockId = generic::BlockId<Block>;
 /// The extension to the basic transaction logic.
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	),
+>;
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
 	generic::UncheckedExtrinsic<Address, RuntimeCall, Signature, TxExtension>;
diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml
index b3d92444c7d..79400753262 100644
--- a/cumulus/test/service/Cargo.toml
+++ b/cumulus/test/service/Cargo.toml
@@ -81,8 +81,8 @@ cumulus-client-parachain-inherent = { workspace = true, default-features = true
 cumulus-client-pov-recovery = { workspace = true, default-features = true }
 cumulus-client-service = { workspace = true, default-features = true }
 cumulus-pallet-parachain-system = { workspace = true }
+cumulus-pallet-weight-reclaim = { workspace = true, default-features = true }
 cumulus-primitives-core = { workspace = true, default-features = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true }
 cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true }
 cumulus-relay-chain-interface = { workspace = true, default-features = true }
 cumulus-relay-chain-minimal-node = { workspace = true, default-features = true }
@@ -107,6 +107,7 @@ substrate-test-utils = { workspace = true }
 [features]
 runtime-benchmarks = [
 	"cumulus-pallet-parachain-system/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim/runtime-benchmarks",
 	"cumulus-primitives-core/runtime-benchmarks",
 	"cumulus-test-client/runtime-benchmarks",
 	"frame-system/runtime-benchmarks",
diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs
index 2c13d20333a..f3f04cbb638 100644
--- a/cumulus/test/service/src/lib.rs
+++ b/cumulus/test/service/src/lib.rs
@@ -976,13 +976,12 @@ pub fn construct_extrinsic(
 		frame_system::CheckNonce::<runtime::Runtime>::from(nonce),
 		frame_system::CheckWeight::<runtime::Runtime>::new(),
 		pallet_transaction_payment::ChargeTransactionPayment::<runtime::Runtime>::from(tip),
-		cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::<runtime::Runtime>::new(),
 	)
 		.into();
 	let raw_payload = runtime::SignedPayload::from_raw(
 		function.clone(),
 		tx_ext.clone(),
-		((), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()),
+		((), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), ()),
 	);
 	let signature = raw_payload.using_encoded(|e| caller.sign(e));
 	runtime::UncheckedExtrinsic::new_signed(
diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml
index a856e94f42b..f526c07796e 100644
--- a/docs/sdk/Cargo.toml
+++ b/docs/sdk/Cargo.toml
@@ -68,8 +68,8 @@ substrate-wasm-builder = { workspace = true, default-features = true }
 cumulus-client-service = { workspace = true, default-features = true }
 cumulus-pallet-aura-ext = { workspace = true, default-features = true }
 cumulus-pallet-parachain-system = { workspace = true, default-features = true }
+cumulus-pallet-weight-reclaim = { workspace = true, default-features = true }
 cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true }
-cumulus-primitives-storage-weight-reclaim = { workspace = true, default-features = true }
 parachain-info = { workspace = true, default-features = true }
 
 # Omni Node
diff --git a/docs/sdk/src/guides/enable_pov_reclaim.rs b/docs/sdk/src/guides/enable_pov_reclaim.rs
index cb6960b3df4..71abeacd18c 100644
--- a/docs/sdk/src/guides/enable_pov_reclaim.rs
+++ b/docs/sdk/src/guides/enable_pov_reclaim.rs
@@ -62,8 +62,10 @@
 //!
 //! In your runtime, you will find a list of TransactionExtensions.
 //! To enable the reclaiming,
-//! add [`StorageWeightReclaim`](cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim)
-//! to that list. For maximum efficiency, make sure that `StorageWeightReclaim` is last in the list.
+//! set [`StorageWeightReclaim`](cumulus_pallet_weight_reclaim::StorageWeightReclaim)
+//! as a warpper of that list.
+//! It is necessary that this extension wraps all the other transaction extensions in order to catch
+//! the whole PoV size of the transactions.
 //! The extension will check the size of the storage proof before and after an extrinsic execution.
 //! It reclaims the difference between the calculated size and the benchmarked size.
 #![doc = docify::embed!("../../templates/parachain/runtime/src/lib.rs", template_signed_extra)]
diff --git a/docs/sdk/src/reference_docs/transaction_extensions.rs b/docs/sdk/src/reference_docs/transaction_extensions.rs
index 0f8198e8372..fe213458b25 100644
--- a/docs/sdk/src/reference_docs/transaction_extensions.rs
+++ b/docs/sdk/src/reference_docs/transaction_extensions.rs
@@ -47,9 +47,11 @@
 //!   to include the so-called metadata hash. This is required by chains to support the generic
 //!   Ledger application and other similar offline wallets.
 //!
-//! - [`StorageWeightReclaim`](cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim): A
-//!   transaction extension for parachains that reclaims unused storage weight after executing a
-//!   transaction.
+//! - [`WeightReclaim`](frame_system::WeightReclaim): A transaction extension for the relay chain
+//!   that reclaims unused weight after executing a transaction.
+//!
+//! - [`StorageWeightReclaim`](cumulus_pallet_weight_reclaim::StorageWeightReclaim): A transaction
+//!   extension for parachains that reclaims unused storage weight after executing a transaction.
 //!
 //! For more information about these extensions, follow the link to the type documentation.
 //!
diff --git a/polkadot/node/service/src/benchmarking.rs b/polkadot/node/service/src/benchmarking.rs
index 0cf16edc03c..5b814a22d2f 100644
--- a/polkadot/node/service/src/benchmarking.rs
+++ b/polkadot/node/service/src/benchmarking.rs
@@ -155,6 +155,7 @@ fn westend_sign_call(
 		frame_system::CheckWeight::<runtime::Runtime>::new(),
 		pallet_transaction_payment::ChargeTransactionPayment::<runtime::Runtime>::from(0),
 		frame_metadata_hash_extension::CheckMetadataHash::<runtime::Runtime>::new(false),
+		frame_system::WeightReclaim::<runtime::Runtime>::new(),
 	)
 		.into();
 
@@ -171,6 +172,7 @@ fn westend_sign_call(
 			(),
 			(),
 			None,
+			(),
 		),
 	);
 
@@ -210,6 +212,7 @@ fn rococo_sign_call(
 		frame_system::CheckWeight::<runtime::Runtime>::new(),
 		pallet_transaction_payment::ChargeTransactionPayment::<runtime::Runtime>::from(0),
 		frame_metadata_hash_extension::CheckMetadataHash::<runtime::Runtime>::new(false),
+		frame_system::WeightReclaim::<runtime::Runtime>::new(),
 	)
 		.into();
 
@@ -226,6 +229,7 @@ fn rococo_sign_call(
 			(),
 			(),
 			None,
+			(),
 		),
 	);
 
diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs
index f34bb62a7cf..75fd0d9af30 100644
--- a/polkadot/node/test/service/src/lib.rs
+++ b/polkadot/node/test/service/src/lib.rs
@@ -423,6 +423,7 @@ pub fn construct_extrinsic(
 		frame_system::CheckNonce::<Runtime>::from(nonce),
 		frame_system::CheckWeight::<Runtime>::new(),
 		pallet_transaction_payment::ChargeTransactionPayment::<Runtime>::from(tip),
+		frame_system::WeightReclaim::<Runtime>::new(),
 	)
 		.into();
 	let raw_payload = SignedPayload::from_raw(
@@ -437,6 +438,7 @@ pub fn construct_extrinsic(
 			(),
 			(),
 			(),
+			(),
 		),
 	);
 	let signature = raw_payload.using_encoded(|e| caller.sign(e));
diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs
index 4034f8bc143..cab4394eb5a 100644
--- a/polkadot/runtime/rococo/src/lib.rs
+++ b/polkadot/runtime/rococo/src/lib.rs
@@ -674,6 +674,7 @@ where
 			frame_system::CheckWeight::<Runtime>::new(),
 			pallet_transaction_payment::ChargeTransactionPayment::<Runtime>::from(tip),
 			frame_metadata_hash_extension::CheckMetadataHash::new(true),
+			frame_system::WeightReclaim::<Runtime>::new(),
 		)
 			.into();
 		let raw_payload = SignedPayload::new(call, tx_ext)
@@ -1617,6 +1618,7 @@ pub type TxExtension = (
 	frame_system::CheckWeight<Runtime>,
 	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
 	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 
 /// Unchecked extrinsic type as expected by this runtime.
diff --git a/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs b/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs
index 99dac1ba75f..88596a37cc0 100644
--- a/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs
+++ b/polkadot/runtime/rococo/src/weights/frame_system_extensions.rs
@@ -17,25 +17,23 @@
 //! Autogenerated weights for `frame_system_extensions`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot
+// target/production/polkadot
 // benchmark
 // pallet
-// --chain=rococo-dev
 // --steps=50
 // --repeat=20
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --pallet=frame_system_extensions
 // --extrinsic=*
-// --execution=wasm
 // --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=frame_system_extensions
+// --chain=rococo-dev
 // --header=./polkadot/file_header.txt
 // --output=./polkadot/runtime/rococo/src/weights/
 
@@ -50,45 +48,36 @@ use core::marker::PhantomData;
 /// Weight functions for `frame_system_extensions`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<T> {
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_genesis() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `54`
-		//  Estimated: `3509`
-		// Minimum execution time: 3_262_000 picoseconds.
-		Weight::from_parts(3_497_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Measured:  `30`
+		//  Estimated: `0`
+		// Minimum execution time: 3_528_000 picoseconds.
+		Weight::from_parts(3_657_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_mortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_416_000 picoseconds.
-		Weight::from_parts(5_690_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Measured:  `68`
+		//  Estimated: `0`
+		// Minimum execution time: 6_456_000 picoseconds.
+		Weight::from_parts(6_706_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_immortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 5_416_000 picoseconds.
-		Weight::from_parts(5_690_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Measured:  `68`
+		//  Estimated: `0`
+		// Minimum execution time: 6_210_000 picoseconds.
+		Weight::from_parts(6_581_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_non_zero_sender() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 471_000 picoseconds.
-		Weight::from_parts(552_000, 0)
+		// Minimum execution time: 529_000 picoseconds.
+		Weight::from_parts(561_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	/// Storage: `System::Account` (r:1 w:1)
@@ -97,8 +86,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `3593`
-		// Minimum execution time: 4_847_000 picoseconds.
-		Weight::from_parts(5_091_000, 0)
+		// Minimum execution time: 6_935_000 picoseconds.
+		Weight::from_parts(7_264_000, 0)
 			.saturating_add(Weight::from_parts(0, 3593))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -107,28 +96,32 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 388_000 picoseconds.
-		Weight::from_parts(421_000, 0)
+		// Minimum execution time: 452_000 picoseconds.
+		Weight::from_parts(474_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 378_000 picoseconds.
-		Weight::from_parts(440_000, 0)
+		// Minimum execution time: 422_000 picoseconds.
+		Weight::from_parts(460_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
-	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `24`
-		//  Estimated: `1489`
-		// Minimum execution time: 3_402_000 picoseconds.
-		Weight::from_parts(3_627_000, 0)
-			.saturating_add(Weight::from_parts(0, 1489))
-			.saturating_add(T::DbWeight::get().reads(1))
-			.saturating_add(T::DbWeight::get().writes(1))
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 3_632_000 picoseconds.
+		Weight::from_parts(3_784_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
+	}
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 2_209_000 picoseconds.
+		Weight::from_parts(2_335_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 }
diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs
index d4031f7ac57..82564d5c278 100644
--- a/polkadot/runtime/test-runtime/src/lib.rs
+++ b/polkadot/runtime/test-runtime/src/lib.rs
@@ -443,6 +443,7 @@ where
 			frame_system::CheckNonce::<Runtime>::from(nonce),
 			frame_system::CheckWeight::<Runtime>::new(),
 			pallet_transaction_payment::ChargeTransactionPayment::<Runtime>::from(tip),
+			frame_system::WeightReclaim::<Runtime>::new(),
 		)
 			.into();
 		let raw_payload = SignedPayload::new(call, tx_ext)
@@ -834,6 +835,7 @@ pub type TxExtension = (
 	frame_system::CheckNonce<Runtime>,
 	frame_system::CheckWeight<Runtime>,
 	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index cd8eb4d2505..166f3fc42ee 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -923,6 +923,7 @@ where
 			frame_system::CheckWeight::<Runtime>::new(),
 			pallet_transaction_payment::ChargeTransactionPayment::<Runtime>::from(tip),
 			frame_metadata_hash_extension::CheckMetadataHash::<Runtime>::new(true),
+			frame_system::WeightReclaim::<Runtime>::new(),
 		)
 			.into();
 		let raw_payload = SignedPayload::new(call, tx_ext)
@@ -1814,6 +1815,7 @@ pub type TxExtension = (
 	frame_system::CheckWeight<Runtime>,
 	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
 	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 
 parameter_types! {
diff --git a/polkadot/runtime/westend/src/weights/frame_system_extensions.rs b/polkadot/runtime/westend/src/weights/frame_system_extensions.rs
index 048f23fbcb9..75f4f6d00b5 100644
--- a/polkadot/runtime/westend/src/weights/frame_system_extensions.rs
+++ b/polkadot/runtime/westend/src/weights/frame_system_extensions.rs
@@ -17,24 +17,25 @@
 //! Autogenerated weights for `frame_system_extensions`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-09-12, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/debug/polkadot
+// target/production/polkadot
 // benchmark
 // pallet
-// --steps=2
-// --repeat=2
+// --steps=50
+// --repeat=20
 // --extrinsic=*
 // --wasm-execution=compiled
 // --heap-pages=4096
-// --pallet=frame-system-extensions
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=frame_system_extensions
 // --chain=westend-dev
-// --output=./polkadot/runtime/westend/src/weights/
 // --header=./polkadot/file_header.txt
+// --output=./polkadot/runtime/westend/src/weights/
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -47,45 +48,36 @@ use core::marker::PhantomData;
 /// Weight functions for `frame_system_extensions`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<T> {
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_genesis() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `54`
-		//  Estimated: `3509`
-		// Minimum execution time: 75_764_000 picoseconds.
-		Weight::from_parts(85_402_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Measured:  `30`
+		//  Estimated: `0`
+		// Minimum execution time: 3_357_000 picoseconds.
+		Weight::from_parts(3_484_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_mortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 118_233_000 picoseconds.
-		Weight::from_parts(126_539_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Measured:  `68`
+		//  Estimated: `0`
+		// Minimum execution time: 6_242_000 picoseconds.
+		Weight::from_parts(6_566_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::BlockHash` (r:1 w:0)
-	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn check_mortality_immortal_transaction() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
-		//  Estimated: `3509`
-		// Minimum execution time: 118_233_000 picoseconds.
-		Weight::from_parts(126_539_000, 0)
-			.saturating_add(Weight::from_parts(0, 3509))
-			.saturating_add(T::DbWeight::get().reads(1))
+		//  Measured:  `68`
+		//  Estimated: `0`
+		// Minimum execution time: 6_268_000 picoseconds.
+		Weight::from_parts(6_631_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_non_zero_sender() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 7_885_000 picoseconds.
-		Weight::from_parts(12_784_000, 0)
+		// Minimum execution time: 567_000 picoseconds.
+		Weight::from_parts(617_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	/// Storage: `System::Account` (r:1 w:1)
@@ -94,8 +86,8 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `3593`
-		// Minimum execution time: 104_237_000 picoseconds.
-		Weight::from_parts(110_910_000, 0)
+		// Minimum execution time: 6_990_000 picoseconds.
+		Weight::from_parts(7_343_000, 0)
 			.saturating_add(Weight::from_parts(0, 3593))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -104,28 +96,32 @@ impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 6_141_000 picoseconds.
-		Weight::from_parts(11_502_000, 0)
+		// Minimum execution time: 422_000 picoseconds.
+		Weight::from_parts(475_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
 	fn check_tx_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 6_192_000 picoseconds.
-		Weight::from_parts(11_481_000, 0)
+		// Minimum execution time: 434_000 picoseconds.
+		Weight::from_parts(519_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 	}
-	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
-	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn check_weight() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `24`
-		//  Estimated: `1489`
-		// Minimum execution time: 87_616_000 picoseconds.
-		Weight::from_parts(93_607_000, 0)
-			.saturating_add(Weight::from_parts(0, 1489))
-			.saturating_add(T::DbWeight::get().reads(1))
-			.saturating_add(T::DbWeight::get().writes(1))
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 3_524_000 picoseconds.
+		Weight::from_parts(3_706_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
+	}
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 2_216_000 picoseconds.
+		Weight::from_parts(2_337_000, 0)
+			.saturating_add(Weight::from_parts(0, 0))
 	}
 }
diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs
index 26ea226313f..6ebf6476f7e 100644
--- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs
+++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs
@@ -37,6 +37,7 @@ pub type TxExtension = (
 	frame_system::CheckMortality<Test>,
 	frame_system::CheckNonce<Test>,
 	frame_system::CheckWeight<Test>,
+	frame_system::WeightReclaim<Test>,
 );
 pub type Address = sp_runtime::MultiAddress<AccountId, AccountIndex>;
 pub type UncheckedExtrinsic =
diff --git a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs
index fb5d1ae7c0e..56a77094f17 100644
--- a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs
+++ b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs
@@ -60,7 +60,8 @@ construct_runtime! {
 	}
 }
 
-pub type TxExtension = (frame_system::CheckWeight<TestRuntime>,);
+pub type TxExtension =
+	(frame_system::CheckWeight<TestRuntime>, frame_system::WeightReclaim<TestRuntime>);
 
 // we only use the hash type from this, so using the mock should be fine.
 pub(crate) type Extrinsic = sp_runtime::generic::UncheckedExtrinsic<
diff --git a/prdoc/pr_6140.prdoc b/prdoc/pr_6140.prdoc
new file mode 100644
index 00000000000..7e2bd3802cd
--- /dev/null
+++ b/prdoc/pr_6140.prdoc
@@ -0,0 +1,95 @@
+title: Accurate weight reclaim with frame_system::WeightReclaim and cumulus `StorageWeightReclaim` transaction extensions
+
+doc:
+  - audience: Runtime Dev
+    description: |
+      Since the introduction of transaction extension, the transaction extension weight is no longer part of base extrinsic weight. As a consequence some weight of transaction extensions are missed when calculating post dispatch weight and reclaiming unused block weight.
+
+      For solo chains, in order to reclaim the weight accurately `frame_system::WeightReclaim` transaction extension must be used at the end of the transaction extension pipeline.
+
+      For para chains `StorageWeightReclaim` in `cumulus-primitives-storage-weight-reclaim` is deprecated.
+      A new transaction extension `StorageWeightReclaim` in `cumulus-pallet-weight-reclaim` is introduced.
+      `StorageWeightReclaim` is meant to be used as a wrapping of the whole transaction extension pipeline, and will take into account all proof size accurately.
+
+      The new wrapping transaction extension is used like this:
+      ```rust
+      /// The TransactionExtension to the basic transaction logic.
+      pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+             Runtime,
+             (
+                     frame_system::CheckNonZeroSender<Runtime>,
+                     frame_system::CheckSpecVersion<Runtime>,
+                     frame_system::CheckTxVersion<Runtime>,
+                     frame_system::CheckGenesis<Runtime>,
+                     frame_system::CheckEra<Runtime>,
+                     frame_system::CheckNonce<Runtime>,
+                     pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+                     BridgeRejectObsoleteHeadersAndMessages,
+                     (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages,),
+                     frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+                     frame_system::CheckWeight<Runtime>,
+             ),
+      >;
+      ```
+
+      NOTE: prior to transaction extension, `StorageWeightReclaim` also missed the some proof size used by other transaction extension prior to itself. This is also fixed by the wrapping `StorageWeightReclaim`.
+
+crates:
+- name: cumulus-primitives-storage-weight-reclaim
+  bump: minor
+- name: sp-runtime
+  bump: patch
+- name: polkadot-sdk
+  bump: minor
+- name: asset-hub-rococo-runtime
+  bump: major
+- name: asset-hub-westend-runtime
+  bump: major
+- name: bridge-hub-rococo-runtime
+  bump: major
+- name: bridge-hub-westend-runtime
+  bump: major
+- name: collectives-westend-runtime
+  bump: major
+- name: coretime-rococo-runtime
+  bump: major
+- name: coretime-westend-runtime
+  bump: major
+- name: people-rococo-runtime
+  bump: major
+- name: people-westend-runtime
+  bump: major
+- name: contracts-rococo-runtime
+  bump: major
+- name: frame-support
+  bump: minor
+- name: frame-executive
+  bump: patch
+- name: frame-system
+  bump: major
+- name: staging-xcm-builder
+  bump: patch
+- name: xcm-runtime-apis
+  bump: patch
+- name: cumulus-pallet-weight-reclaim
+  bump: major
+- name: polkadot-service
+  bump: major
+- name: westend-runtime
+  bump: major
+- name: frame-metadata-hash-extension
+  bump: patch
+- name: frame-system-benchmarking
+  bump: major
+- name: polkadot-sdk-frame
+  bump: major
+- name: rococo-runtime
+  bump: major
+- name: cumulus-pov-validator
+  bump: patch
+- name: penpal-runtime
+  bump: major
+- name: glutton-westend-runtime
+  bump: major
+- name: rococo-parachain-runtime
+  bump: major
diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs
index 5f6806c235f..e531097dbb5 100644
--- a/substrate/bin/node/cli/src/service.rs
+++ b/substrate/bin/node/cli/src/service.rs
@@ -138,6 +138,7 @@ pub fn create_extrinsic(
 				>::from(tip, None),
 			),
 			frame_metadata_hash_extension::CheckMetadataHash::new(false),
+			frame_system::WeightReclaim::<kitchensink_runtime::Runtime>::new(),
 		);
 
 	let raw_payload = kitchensink_runtime::SignedPayload::from_raw(
@@ -153,6 +154,7 @@ pub fn create_extrinsic(
 			(),
 			(),
 			None,
+			(),
 		),
 	);
 	let signature = raw_payload.using_encoded(|e| sender.sign(e));
@@ -1060,6 +1062,7 @@ mod tests {
 				let tx_payment = pallet_skip_feeless_payment::SkipCheckIfFeeless::from(
 					pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None),
 				);
+				let weight_reclaim = frame_system::WeightReclaim::new();
 				let metadata_hash = frame_metadata_hash_extension::CheckMetadataHash::new(false);
 				let tx_ext: TxExtension = (
 					check_non_zero_sender,
@@ -1071,6 +1074,7 @@ mod tests {
 					check_weight,
 					tx_payment,
 					metadata_hash,
+					weight_reclaim,
 				);
 				let raw_payload = SignedPayload::from_raw(
 					function,
@@ -1085,6 +1089,7 @@ mod tests {
 						(),
 						(),
 						None,
+						(),
 					),
 				);
 				let signature = raw_payload.using_encoded(|payload| signer.sign(payload));
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index 45ae378cc00..93b134e8165 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -1532,6 +1532,7 @@ where
 				),
 			),
 			frame_metadata_hash_extension::CheckMetadataHash::new(false),
+			frame_system::WeightReclaim::<Runtime>::new(),
 		);
 
 		let raw_payload = SignedPayload::new(call, tx_ext)
@@ -2674,6 +2675,7 @@ pub type TxExtension = (
 		pallet_asset_conversion_tx_payment::ChargeAssetTxPayment<Runtime>,
 	>,
 	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 
 #[derive(Clone, PartialEq, Eq, Debug)]
@@ -2695,6 +2697,7 @@ impl EthExtra for EthExtraImpl {
 			pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::<Runtime>::from(tip, None)
 				.into(),
 			frame_metadata_hash_extension::CheckMetadataHash::<Runtime>::new(false),
+			frame_system::WeightReclaim::<Runtime>::new(),
 		)
 	}
 }
diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs
index e5b0299f01a..08d6ad6dcc3 100644
--- a/substrate/bin/node/testing/src/keyring.rs
+++ b/substrate/bin/node/testing/src/keyring.rs
@@ -86,6 +86,7 @@ pub fn tx_ext(nonce: Nonce, extra_fee: Balance) -> TxExtension {
 			pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None),
 		),
 		frame_metadata_hash_extension::CheckMetadataHash::new(false),
+		frame_system::WeightReclaim::new(),
 	)
 }
 
diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs
index 3841b010325..882d875f3d8 100644
--- a/substrate/frame/executive/src/tests.rs
+++ b/substrate/frame/executive/src/tests.rs
@@ -335,6 +335,9 @@ impl frame_system::ExtensionsWeightInfo for MockExtensionsWeights {
 	fn check_weight() -> Weight {
 		Weight::from_parts(10, 0)
 	}
+	fn weight_reclaim() -> Weight {
+		Weight::zero()
+	}
 }
 
 #[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
@@ -452,6 +455,7 @@ type TxExtension = (
 	frame_system::CheckNonce<Runtime>,
 	frame_system::CheckWeight<Runtime>,
 	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 type UncheckedXt = sp_runtime::generic::UncheckedExtrinsic<
 	u64,
@@ -560,6 +564,7 @@ fn tx_ext(nonce: u64, fee: Balance) -> TxExtension {
 		frame_system::CheckNonce::from(nonce),
 		frame_system::CheckWeight::new(),
 		pallet_transaction_payment::ChargeTransactionPayment::from(fee),
+		frame_system::WeightReclaim::new(),
 	)
 		.into()
 }
diff --git a/substrate/frame/metadata-hash-extension/src/tests.rs b/substrate/frame/metadata-hash-extension/src/tests.rs
index 11a3345ee15..7a6966f4629 100644
--- a/substrate/frame/metadata-hash-extension/src/tests.rs
+++ b/substrate/frame/metadata-hash-extension/src/tests.rs
@@ -144,6 +144,7 @@ mod docs {
 			// Add the `CheckMetadataHash` extension.
 			// The position in this list is not important, so we could also add it to beginning.
 			frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+			frame_system::WeightReclaim<Runtime>,
 		);
 
 		/// In your runtime this will be your real address type.
diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs
index b3e340cbcbf..b0338b68231 100644
--- a/substrate/frame/src/lib.rs
+++ b/substrate/frame/src/lib.rs
@@ -495,6 +495,7 @@ pub mod runtime {
 			frame_system::CheckEra<T>,
 			frame_system::CheckNonce<T>,
 			frame_system::CheckWeight<T>,
+			frame_system::WeightReclaim<T>,
 		);
 	}
 
diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs
index 483a3dce77f..99099683003 100644
--- a/substrate/frame/support/src/dispatch.rs
+++ b/substrate/frame/support/src/dispatch.rs
@@ -308,6 +308,19 @@ impl PostDispatchInfo {
 	/// Calculate how much weight was actually spent by the `Dispatchable`.
 	pub fn calc_actual_weight(&self, info: &DispatchInfo) -> Weight {
 		if let Some(actual_weight) = self.actual_weight {
+			let info_total_weight = info.total_weight();
+			if actual_weight.any_gt(info_total_weight) {
+				log::error!(
+					target: crate::LOG_TARGET,
+					"Post dispatch weight is greater than pre dispatch weight. \
+					Pre dispatch weight may underestimating the actual weight. \
+					Greater post dispatch weight components are ignored.
+					Pre dispatch weight: {:?},
+					Post dispatch weight: {:?}",
+					actual_weight,
+					info_total_weight,
+				);
+			}
 			actual_weight.min(info.total_weight())
 		} else {
 			info.total_weight()
diff --git a/substrate/frame/system/benchmarking/src/extensions.rs b/substrate/frame/system/benchmarking/src/extensions.rs
index 01e4687bc4b..25d6ea03557 100644
--- a/substrate/frame/system/benchmarking/src/extensions.rs
+++ b/substrate/frame/system/benchmarking/src/extensions.rs
@@ -29,7 +29,7 @@ use frame_support::{
 use frame_system::{
 	pallet_prelude::*, CheckGenesis, CheckMortality, CheckNonZeroSender, CheckNonce,
 	CheckSpecVersion, CheckTxVersion, CheckWeight, Config, ExtensionsWeightInfo, Pallet as System,
-	RawOrigin,
+	RawOrigin, WeightReclaim,
 };
 use sp_runtime::{
 	generic::Era,
@@ -254,5 +254,49 @@ mod benchmarks {
 		Ok(())
 	}
 
+	#[benchmark]
+	fn weight_reclaim() -> Result<(), BenchmarkError> {
+		let caller = account("caller", 0, 0);
+		let base_extrinsic = <T as frame_system::Config>::BlockWeights::get()
+			.get(DispatchClass::Normal)
+			.base_extrinsic;
+		let extension_weight = <T as frame_system::Config>::ExtensionsWeightInfo::weight_reclaim();
+		let info = DispatchInfo {
+			call_weight: Weight::from_parts(base_extrinsic.ref_time() * 5, 0),
+			extension_weight,
+			class: DispatchClass::Normal,
+			..Default::default()
+		};
+		let call: T::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into();
+		let post_info = PostDispatchInfo {
+			actual_weight: Some(Weight::from_parts(base_extrinsic.ref_time() * 2, 0)),
+			pays_fee: Default::default(),
+		};
+		let len = 0_usize;
+		let ext = WeightReclaim::<T>::new();
+
+		let initial_block_weight = Weight::from_parts(base_extrinsic.ref_time() * 2, 0);
+		frame_system::BlockWeight::<T>::mutate(|current_weight| {
+			current_weight.set(Weight::zero(), DispatchClass::Mandatory);
+			current_weight.set(initial_block_weight, DispatchClass::Normal);
+			current_weight.accrue(base_extrinsic + info.total_weight(), DispatchClass::Normal);
+		});
+
+		#[block]
+		{
+			ext.test_run(RawOrigin::Signed(caller).into(), &call, &info, len, 0, |_| Ok(post_info))
+				.unwrap()
+				.unwrap();
+		}
+
+		assert_eq!(
+			System::<T>::block_weight().total(),
+			initial_block_weight +
+				base_extrinsic +
+				post_info.actual_weight.unwrap().saturating_add(extension_weight),
+		);
+		Ok(())
+	}
+
 	impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test,);
 }
diff --git a/substrate/frame/system/benchmarking/src/mock.rs b/substrate/frame/system/benchmarking/src/mock.rs
index 6b126619ce5..61b5b885ec6 100644
--- a/substrate/frame/system/benchmarking/src/mock.rs
+++ b/substrate/frame/system/benchmarking/src/mock.rs
@@ -65,6 +65,10 @@ impl frame_system::ExtensionsWeightInfo for MockWeights {
 	fn check_weight() -> Weight {
 		Weight::from_parts(10, 0)
 	}
+
+	fn weight_reclaim() -> Weight {
+		Weight::from_parts(10, 0)
+	}
 }
 
 #[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs
index ee91478b90f..de0303defd0 100644
--- a/substrate/frame/system/src/extensions/check_weight.rs
+++ b/substrate/frame/system/src/extensions/check_weight.rs
@@ -135,30 +135,12 @@ where
 		Ok(())
 	}
 
+	#[deprecated(note = "Use `frame_system::Pallet::reclaim_weight` instead.")]
 	pub fn do_post_dispatch(
 		info: &DispatchInfoOf<T::RuntimeCall>,
 		post_info: &PostDispatchInfoOf<T::RuntimeCall>,
 	) -> Result<(), TransactionValidityError> {
-		let unspent = post_info.calc_unspent(info);
-		if unspent.any_gt(Weight::zero()) {
-			crate::BlockWeight::<T>::mutate(|current_weight| {
-				current_weight.reduce(unspent, info.class);
-			})
-		}
-
-		log::trace!(
-			target: LOG_TARGET,
-			"Used block weight: {:?}",
-			crate::BlockWeight::<T>::get(),
-		);
-
-		log::trace!(
-			target: LOG_TARGET,
-			"Used block length: {:?}",
-			Pallet::<T>::all_extrinsics_len(),
-		);
-
-		Ok(())
+		crate::Pallet::<T>::reclaim_weight(info, post_info)
 	}
 }
 
@@ -279,8 +261,7 @@ where
 		_len: usize,
 		_result: &DispatchResult,
 	) -> Result<Weight, TransactionValidityError> {
-		Self::do_post_dispatch(info, post_info)?;
-		Ok(Weight::zero())
+		crate::Pallet::<T>::reclaim_weight(info, post_info).map(|()| Weight::zero())
 	}
 
 	fn bare_validate(
@@ -306,7 +287,7 @@ where
 		_len: usize,
 		_result: &DispatchResult,
 	) -> Result<(), TransactionValidityError> {
-		Self::do_post_dispatch(info, post_info)
+		crate::Pallet::<T>::reclaim_weight(info, post_info)
 	}
 }
 
@@ -744,6 +725,121 @@ mod tests {
 		})
 	}
 
+	#[test]
+	fn extrinsic_already_refunded_more_precisely() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info =
+				DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() };
+			let post_info = PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(128, 0)),
+				pays_fee: Default::default(),
+			};
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let accurate_refund = Weight::from_parts(510, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(Weight::zero(), DispatchClass::Mandatory);
+				current_weight.set(prior_block_weight, DispatchClass::Normal);
+			});
+
+			// Validate and prepare extrinsic
+			let pre = CheckWeight::<Test>(PhantomData)
+				.validate_and_prepare(Some(1).into(), CALL, &info, len, 0)
+				.unwrap()
+				.0;
+
+			assert_eq!(
+				BlockWeight::<Test>::get().total(),
+				info.total_weight() + prior_block_weight + base_extrinsic
+			);
+
+			// Refund more accurately than the benchmark
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.reduce(accurate_refund, DispatchClass::Normal);
+			});
+			crate::ExtrinsicWeightReclaimed::<Test>::put(accurate_refund);
+
+			// Do the post dispatch
+			assert_ok!(CheckWeight::<Test>::post_dispatch_details(
+				pre,
+				&info,
+				&post_info,
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund is used
+			assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), accurate_refund);
+			assert_eq!(
+				BlockWeight::<Test>::get().total(),
+				info.total_weight() - accurate_refund + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+
+	#[test]
+	fn extrinsic_already_refunded_less_precisely() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info =
+				DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() };
+			let post_info = PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(128, 0)),
+				pays_fee: Default::default(),
+			};
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let inaccurate_refund = Weight::from_parts(110, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(Weight::zero(), DispatchClass::Mandatory);
+				current_weight.set(prior_block_weight, DispatchClass::Normal);
+			});
+
+			// Validate and prepare extrinsic
+			let pre = CheckWeight::<Test>(PhantomData)
+				.validate_and_prepare(Some(1).into(), CALL, &info, len, 0)
+				.unwrap()
+				.0;
+
+			assert_eq!(
+				BlockWeight::<Test>::get().total(),
+				info.total_weight() + prior_block_weight + base_extrinsic
+			);
+
+			// Refund less accurately than the benchmark
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.reduce(inaccurate_refund, DispatchClass::Normal);
+			});
+			crate::ExtrinsicWeightReclaimed::<Test>::put(inaccurate_refund);
+
+			// Do the post dispatch
+			assert_ok!(CheckWeight::<Test>::post_dispatch_details(
+				pre,
+				&info,
+				&post_info,
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund from benchmark is used
+			assert_eq!(
+				crate::ExtrinsicWeightReclaimed::<Test>::get(),
+				post_info.calc_unspent(&info)
+			);
+			assert_eq!(
+				BlockWeight::<Test>::get().total(),
+				post_info.actual_weight.unwrap() + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+
 	#[test]
 	fn zero_weight_extrinsic_still_has_base_weight() {
 		new_test_ext().execute_with(|| {
diff --git a/substrate/frame/system/src/extensions/mod.rs b/substrate/frame/system/src/extensions/mod.rs
index d79104d2240..66a8b17d30a 100644
--- a/substrate/frame/system/src/extensions/mod.rs
+++ b/substrate/frame/system/src/extensions/mod.rs
@@ -22,6 +22,7 @@ pub mod check_nonce;
 pub mod check_spec_version;
 pub mod check_tx_version;
 pub mod check_weight;
+pub mod weight_reclaim;
 pub mod weights;
 
 pub use weights::WeightInfo;
diff --git a/substrate/frame/system/src/extensions/weight_reclaim.rs b/substrate/frame/system/src/extensions/weight_reclaim.rs
new file mode 100644
index 00000000000..0c37422a843
--- /dev/null
+++ b/substrate/frame/system/src/extensions/weight_reclaim.rs
@@ -0,0 +1,401 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::Config;
+use codec::{Decode, Encode};
+use frame_support::dispatch::{DispatchInfo, PostDispatchInfo};
+use scale_info::TypeInfo;
+use sp_runtime::{
+	traits::{
+		DispatchInfoOf, Dispatchable, PostDispatchInfoOf, TransactionExtension, ValidateResult,
+	},
+	transaction_validity::{TransactionSource, TransactionValidityError, ValidTransaction},
+	DispatchResult,
+};
+use sp_weights::Weight;
+
+/// Reclaim the unused weight using the post dispatch information
+///
+/// After the dispatch of the extrinsic, calculate the unused weight using the post dispatch
+/// information and update the block consumed weight according to the new calculated extrinsic
+/// weight.
+#[derive(Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)]
+#[scale_info(skip_type_params(T))]
+pub struct WeightReclaim<T: Config + Send + Sync>(core::marker::PhantomData<T>);
+
+impl<T: Config + Send + Sync> WeightReclaim<T>
+where
+	T::RuntimeCall: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>,
+{
+	/// Creates new `TransactionExtension` to recalculate the extrinsic weight after dispatch.
+	pub fn new() -> Self {
+		Self(Default::default())
+	}
+}
+
+impl<T: Config + Send + Sync> TransactionExtension<T::RuntimeCall> for WeightReclaim<T>
+where
+	T::RuntimeCall: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>,
+{
+	const IDENTIFIER: &'static str = "WeightReclaim";
+	type Implicit = ();
+	type Pre = ();
+	type Val = ();
+
+	fn weight(&self, _: &T::RuntimeCall) -> Weight {
+		<T::ExtensionsWeightInfo as super::WeightInfo>::weight_reclaim()
+	}
+
+	fn validate(
+		&self,
+		origin: T::RuntimeOrigin,
+		_call: &T::RuntimeCall,
+		_info: &DispatchInfoOf<T::RuntimeCall>,
+		_len: usize,
+		_self_implicit: Self::Implicit,
+		_inherited_implication: &impl Encode,
+		_source: TransactionSource,
+	) -> ValidateResult<Self::Val, T::RuntimeCall> {
+		Ok((ValidTransaction::default(), (), origin))
+	}
+
+	fn prepare(
+		self,
+		_val: Self::Val,
+		_origin: &T::RuntimeOrigin,
+		_call: &T::RuntimeCall,
+		_info: &DispatchInfoOf<T::RuntimeCall>,
+		_len: usize,
+	) -> Result<Self::Pre, TransactionValidityError> {
+		Ok(())
+	}
+
+	fn post_dispatch_details(
+		_pre: Self::Pre,
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		post_info: &PostDispatchInfoOf<T::RuntimeCall>,
+		_len: usize,
+		_result: &DispatchResult,
+	) -> Result<Weight, TransactionValidityError> {
+		crate::Pallet::<T>::reclaim_weight(info, post_info).map(|()| Weight::zero())
+	}
+
+	fn bare_validate(
+		_call: &T::RuntimeCall,
+		_info: &DispatchInfoOf<T::RuntimeCall>,
+		_len: usize,
+	) -> frame_support::pallet_prelude::TransactionValidity {
+		Ok(ValidTransaction::default())
+	}
+
+	fn bare_validate_and_prepare(
+		_call: &T::RuntimeCall,
+		_info: &DispatchInfoOf<T::RuntimeCall>,
+		_len: usize,
+	) -> Result<(), TransactionValidityError> {
+		Ok(())
+	}
+
+	fn bare_post_dispatch(
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		post_info: &mut PostDispatchInfoOf<T::RuntimeCall>,
+		_len: usize,
+		_result: &DispatchResult,
+	) -> Result<(), TransactionValidityError> {
+		crate::Pallet::<T>::reclaim_weight(info, post_info)
+	}
+}
+
+impl<T: Config + Send + Sync> core::fmt::Debug for WeightReclaim<T>
+where
+	T::RuntimeCall: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>,
+{
+	fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+		write!(f, "{}", Self::IDENTIFIER)
+	}
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+	use crate::{
+		mock::{new_test_ext, Test},
+		BlockWeight, DispatchClass,
+	};
+	use frame_support::{assert_ok, weights::Weight};
+
+	fn block_weights() -> crate::limits::BlockWeights {
+		<Test as crate::Config>::BlockWeights::get()
+	}
+
+	#[test]
+	fn extrinsic_already_refunded_more_precisely() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info =
+				DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() };
+			let post_info = PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(128, 0)),
+				pays_fee: Default::default(),
+			};
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let accurate_refund = Weight::from_parts(510, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(prior_block_weight, DispatchClass::Normal);
+				current_weight.accrue(
+					base_extrinsic + info.total_weight() - accurate_refund,
+					DispatchClass::Normal,
+				);
+			});
+			crate::ExtrinsicWeightReclaimed::<Test>::put(accurate_refund);
+
+			// Do the post dispatch
+			assert_ok!(WeightReclaim::<Test>::post_dispatch_details(
+				(),
+				&info,
+				&post_info,
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund is used
+			assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), accurate_refund);
+			assert_eq!(
+				*BlockWeight::<Test>::get().get(DispatchClass::Normal),
+				info.total_weight() - accurate_refund + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+
+	#[test]
+	fn extrinsic_already_refunded_less_precisely() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info =
+				DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() };
+			let post_info = PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(128, 0)),
+				pays_fee: Default::default(),
+			};
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let inaccurate_refund = Weight::from_parts(110, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(prior_block_weight, DispatchClass::Normal);
+				current_weight.accrue(
+					base_extrinsic + info.total_weight() - inaccurate_refund,
+					DispatchClass::Normal,
+				);
+			});
+			crate::ExtrinsicWeightReclaimed::<Test>::put(inaccurate_refund);
+
+			// Do the post dispatch
+			assert_ok!(WeightReclaim::<Test>::post_dispatch_details(
+				(),
+				&info,
+				&post_info,
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund from benchmark is used
+			assert_eq!(
+				crate::ExtrinsicWeightReclaimed::<Test>::get(),
+				post_info.calc_unspent(&info)
+			);
+			assert_eq!(
+				*BlockWeight::<Test>::get().get(DispatchClass::Normal),
+				post_info.actual_weight.unwrap() + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+
+	#[test]
+	fn extrinsic_not_refunded_before() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info =
+				DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() };
+			let post_info = PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(128, 0)),
+				pays_fee: Default::default(),
+			};
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(prior_block_weight, DispatchClass::Normal);
+				current_weight.accrue(base_extrinsic + info.total_weight(), DispatchClass::Normal);
+			});
+
+			// Do the post dispatch
+			assert_ok!(WeightReclaim::<Test>::post_dispatch_details(
+				(),
+				&info,
+				&post_info,
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund from benchmark is used
+			assert_eq!(
+				crate::ExtrinsicWeightReclaimed::<Test>::get(),
+				post_info.calc_unspent(&info)
+			);
+			assert_eq!(
+				*BlockWeight::<Test>::get().get(DispatchClass::Normal),
+				post_info.actual_weight.unwrap() + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+
+	#[test]
+	fn no_actual_post_dispatch_weight() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info =
+				DispatchInfo { call_weight: Weight::from_parts(512, 0), ..Default::default() };
+			let post_info = PostDispatchInfo { actual_weight: None, pays_fee: Default::default() };
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(prior_block_weight, DispatchClass::Normal);
+				current_weight.accrue(base_extrinsic + info.total_weight(), DispatchClass::Normal);
+			});
+
+			// Do the post dispatch
+			assert_ok!(WeightReclaim::<Test>::post_dispatch_details(
+				(),
+				&info,
+				&post_info,
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund from benchmark is used
+			assert_eq!(
+				crate::ExtrinsicWeightReclaimed::<Test>::get(),
+				post_info.calc_unspent(&info)
+			);
+			assert_eq!(
+				*BlockWeight::<Test>::get().get(DispatchClass::Normal),
+				info.total_weight() + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+
+	#[test]
+	fn different_dispatch_class() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info = DispatchInfo {
+				call_weight: Weight::from_parts(512, 0),
+				class: DispatchClass::Operational,
+				..Default::default()
+			};
+			let post_info = PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(128, 0)),
+				pays_fee: Default::default(),
+			};
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Operational).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(prior_block_weight, DispatchClass::Operational);
+				current_weight
+					.accrue(base_extrinsic + info.total_weight(), DispatchClass::Operational);
+			});
+
+			// Do the post dispatch
+			assert_ok!(WeightReclaim::<Test>::post_dispatch_details(
+				(),
+				&info,
+				&post_info,
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund from benchmark is used
+			assert_eq!(
+				crate::ExtrinsicWeightReclaimed::<Test>::get(),
+				post_info.calc_unspent(&info)
+			);
+			assert_eq!(
+				*BlockWeight::<Test>::get().get(DispatchClass::Operational),
+				post_info.actual_weight.unwrap() + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+
+	#[test]
+	fn bare_also_works() {
+		new_test_ext().execute_with(|| {
+			// This is half of the max block weight
+			let info = DispatchInfo {
+				call_weight: Weight::from_parts(512, 0),
+				class: DispatchClass::Operational,
+				..Default::default()
+			};
+			let post_info = PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(128, 0)),
+				pays_fee: Default::default(),
+			};
+			let prior_block_weight = Weight::from_parts(64, 0);
+			let len = 0_usize;
+			let base_extrinsic = block_weights().get(DispatchClass::Operational).base_extrinsic;
+
+			// Set initial info
+			BlockWeight::<Test>::mutate(|current_weight| {
+				current_weight.set(prior_block_weight, DispatchClass::Operational);
+				current_weight
+					.accrue(base_extrinsic + info.total_weight(), DispatchClass::Operational);
+			});
+
+			// Do the bare post dispatch
+			assert_ok!(WeightReclaim::<Test>::bare_post_dispatch(
+				&info,
+				&mut post_info.clone(),
+				len,
+				&Ok(())
+			));
+
+			// Ensure the accurate refund from benchmark is used
+			assert_eq!(
+				crate::ExtrinsicWeightReclaimed::<Test>::get(),
+				post_info.calc_unspent(&info)
+			);
+			assert_eq!(
+				*BlockWeight::<Test>::get().get(DispatchClass::Operational),
+				post_info.actual_weight.unwrap() + prior_block_weight + base_extrinsic
+			);
+		})
+	}
+}
diff --git a/substrate/frame/system/src/extensions/weights.rs b/substrate/frame/system/src/extensions/weights.rs
index b3c296899be..670bb9a0e6f 100644
--- a/substrate/frame/system/src/extensions/weights.rs
+++ b/substrate/frame/system/src/extensions/weights.rs
@@ -59,6 +59,7 @@ pub trait WeightInfo {
 	fn check_spec_version() -> Weight;
 	fn check_tx_version() -> Weight;
 	fn check_weight() -> Weight;
+	fn weight_reclaim() -> Weight;
 }
 
 /// Weights for `frame_system_extensions` using the Substrate node and recommended hardware.
@@ -133,6 +134,17 @@ impl<T: crate::Config> WeightInfo for SubstrateWeight<T> {
 		// Minimum execution time: 2_887_000 picoseconds.
 		Weight::from_parts(3_006_000, 0)
 	}
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1489`
+		// Minimum execution time: 4_375_000 picoseconds.
+		Weight::from_parts(4_747_000, 1489)
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
 }
 
 // For backwards compatibility and tests.
@@ -206,4 +218,15 @@ impl WeightInfo for () {
 		// Minimum execution time: 2_887_000 picoseconds.
 		Weight::from_parts(3_006_000, 0)
 	}
+	/// Storage: `System::AllExtrinsicsLen` (r:1 w:1)
+	/// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	fn weight_reclaim() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `24`
+		//  Estimated: `1489`
+		// Minimum execution time: 4_375_000 picoseconds.
+		Weight::from_parts(4_747_000, 1489)
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
 }
diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs
index 862fb4cf9fa..4fc69c8755f 100644
--- a/substrate/frame/system/src/lib.rs
+++ b/substrate/frame/system/src/lib.rs
@@ -146,6 +146,10 @@ use frame_support::{
 };
 use scale_info::TypeInfo;
 use sp_core::storage::well_known_keys;
+use sp_runtime::{
+	traits::{DispatchInfoOf, PostDispatchInfoOf},
+	transaction_validity::TransactionValidityError,
+};
 use sp_weights::{RuntimeDbWeight, Weight};
 
 #[cfg(any(feature = "std", test))]
@@ -170,7 +174,7 @@ pub use extensions::{
 	check_genesis::CheckGenesis, check_mortality::CheckMortality,
 	check_non_zero_sender::CheckNonZeroSender, check_nonce::CheckNonce,
 	check_spec_version::CheckSpecVersion, check_tx_version::CheckTxVersion,
-	check_weight::CheckWeight, WeightInfo as ExtensionsWeightInfo,
+	check_weight::CheckWeight, weight_reclaim::WeightReclaim, WeightInfo as ExtensionsWeightInfo,
 };
 // Backward compatible re-export.
 pub use extensions::check_mortality::CheckMortality as CheckEra;
@@ -1039,6 +1043,17 @@ pub mod pallet {
 	pub(super) type AuthorizedUpgrade<T: Config> =
 		StorageValue<_, CodeUpgradeAuthorization<T>, OptionQuery>;
 
+	/// The weight reclaimed for the extrinsic.
+	///
+	/// This information is available until the end of the extrinsic execution.
+	/// More precisely this information is removed in `note_applied_extrinsic`.
+	///
+	/// Logic doing some post dispatch weight reduction must update this storage to avoid duplicate
+	/// reduction.
+	#[pallet::storage]
+	#[pallet::whitelist_storage]
+	pub type ExtrinsicWeightReclaimed<T: Config> = StorageValue<_, Weight, ValueQuery>;
+
 	#[derive(frame_support::DefaultNoBound)]
 	#[pallet::genesis_config]
 	pub struct GenesisConfig<T: Config> {
@@ -2073,10 +2088,23 @@ impl<T: Config> Pallet<T> {
 			},
 		});
 
+		log::trace!(
+			target: LOG_TARGET,
+			"Used block weight: {:?}",
+			BlockWeight::<T>::get(),
+		);
+
+		log::trace!(
+			target: LOG_TARGET,
+			"Used block length: {:?}",
+			Pallet::<T>::all_extrinsics_len(),
+		);
+
 		let next_extrinsic_index = Self::extrinsic_index().unwrap_or_default() + 1u32;
 
 		storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &next_extrinsic_index);
 		ExecutionPhase::<T>::put(Phase::ApplyExtrinsic(next_extrinsic_index));
+		ExtrinsicWeightReclaimed::<T>::kill();
 	}
 
 	/// To be called immediately after `note_applied_extrinsic` of the last extrinsic of the block
@@ -2174,6 +2202,32 @@ impl<T: Config> Pallet<T> {
 		}
 		Ok(actual_hash)
 	}
+
+	/// Reclaim the weight for the extrinsic given info and post info.
+	///
+	/// This function will check the already reclaimed weight, and reclaim more if the
+	/// difference between pre dispatch and post dispatch weight is higher.
+	pub fn reclaim_weight(
+		info: &DispatchInfoOf<T::RuntimeCall>,
+		post_info: &PostDispatchInfoOf<T::RuntimeCall>,
+	) -> Result<(), TransactionValidityError>
+	where
+		T::RuntimeCall: Dispatchable<Info = DispatchInfo, PostInfo = PostDispatchInfo>,
+	{
+		let already_reclaimed = crate::ExtrinsicWeightReclaimed::<T>::get();
+		let unspent = post_info.calc_unspent(info);
+		let accurate_reclaim = already_reclaimed.max(unspent);
+		// Saturation never happens, we took the maximum above.
+		let to_reclaim_more = accurate_reclaim.saturating_sub(already_reclaimed);
+		if to_reclaim_more != Weight::zero() {
+			crate::BlockWeight::<T>::mutate(|current_weight| {
+				current_weight.reduce(to_reclaim_more, info.class);
+			});
+			crate::ExtrinsicWeightReclaimed::<T>::put(accurate_reclaim);
+		}
+
+		Ok(())
+	}
 }
 
 /// Returns a 32 byte datum which is guaranteed to be universally unique. `entropy` is provided
diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs
index 6b903f5b7e7..6415380b284 100644
--- a/substrate/frame/system/src/tests.rs
+++ b/substrate/frame/system/src/tests.rs
@@ -892,3 +892,67 @@ fn test_default_account_nonce() {
 		assert_eq!(System::account_nonce(&1), 5u64.into());
 	});
 }
+
+#[test]
+fn extrinsic_weight_refunded_is_cleaned() {
+	new_test_ext().execute_with(|| {
+		crate::ExtrinsicWeightReclaimed::<Test>::put(Weight::from_parts(1, 2));
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::from_parts(1, 2));
+		System::note_applied_extrinsic(&Ok(().into()), Default::default());
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::zero());
+
+		crate::ExtrinsicWeightReclaimed::<Test>::put(Weight::from_parts(1, 2));
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::from_parts(1, 2));
+		System::note_applied_extrinsic(&Err(DispatchError::BadOrigin.into()), Default::default());
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::zero());
+	});
+}
+
+#[test]
+fn reclaim_works() {
+	new_test_ext().execute_with(|| {
+		let info = DispatchInfo { call_weight: Weight::from_parts(100, 200), ..Default::default() };
+		crate::Pallet::<Test>::reclaim_weight(
+			&info,
+			&PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(50, 100)),
+				..Default::default()
+			},
+		)
+		.unwrap();
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::from_parts(50, 100));
+
+		crate::Pallet::<Test>::reclaim_weight(
+			&info,
+			&PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(25, 200)),
+				..Default::default()
+			},
+		)
+		.unwrap();
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::from_parts(75, 100));
+
+		crate::Pallet::<Test>::reclaim_weight(
+			&info,
+			&PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(300, 50)),
+				..Default::default()
+			},
+		)
+		.unwrap();
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::from_parts(75, 150));
+
+		crate::Pallet::<Test>::reclaim_weight(
+			&info,
+			&PostDispatchInfo {
+				actual_weight: Some(Weight::from_parts(300, 300)),
+				..Default::default()
+			},
+		)
+		.unwrap();
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::from_parts(75, 150));
+
+		System::note_applied_extrinsic(&Ok(().into()), Default::default());
+		assert_eq!(crate::ExtrinsicWeightReclaimed::<Test>::get(), Weight::zero());
+	});
+}
diff --git a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs
index 1842b163162..dec81859847 100644
--- a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs
+++ b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs
@@ -85,7 +85,6 @@ where
 		match self.format {
 			ExtrinsicFormat::Bare => {
 				let inherent_validation = I::validate_unsigned(source, &self.function)?;
-				#[allow(deprecated)]
 				let legacy_validation = Extension::bare_validate(&self.function, info, len)?;
 				Ok(legacy_validation.combine_with(inherent_validation))
 			},
diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs
index 27f33acb69c..4d95e5e6f3a 100644
--- a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs
+++ b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs
@@ -487,7 +487,7 @@ pub trait TransactionExtension<Call: Dispatchable>:
 #[macro_export]
 macro_rules! impl_tx_ext_default {
 	($call:ty ; , $( $rest:tt )*) => {
-		impl_tx_ext_default!{$call ; $( $rest )*}
+		$crate::impl_tx_ext_default!{$call ; $( $rest )*}
 	};
 	($call:ty ; validate $( $rest:tt )*) => {
 		fn validate(
@@ -502,7 +502,7 @@ macro_rules! impl_tx_ext_default {
 		) -> $crate::traits::ValidateResult<Self::Val, $call> {
 			Ok((Default::default(), Default::default(), origin))
 		}
-		impl_tx_ext_default!{$call ; $( $rest )*}
+		$crate::impl_tx_ext_default!{$call ; $( $rest )*}
 	};
 	($call:ty ; prepare $( $rest:tt )*) => {
 		fn prepare(
@@ -515,13 +515,13 @@ macro_rules! impl_tx_ext_default {
 		) -> Result<Self::Pre, $crate::transaction_validity::TransactionValidityError> {
 			Ok(Default::default())
 		}
-		impl_tx_ext_default!{$call ; $( $rest )*}
+		$crate::impl_tx_ext_default!{$call ; $( $rest )*}
 	};
 	($call:ty ; weight $( $rest:tt )*) => {
 		fn weight(&self, _call: &$call) -> $crate::Weight {
 			$crate::Weight::zero()
 		}
-		impl_tx_ext_default!{$call ; $( $rest )*}
+		$crate::impl_tx_ext_default!{$call ; $( $rest )*}
 	};
 	($call:ty ;) => {};
 }
diff --git a/substrate/test-utils/runtime/src/extrinsic.rs b/substrate/test-utils/runtime/src/extrinsic.rs
index 491086bef49..49dc6ba035c 100644
--- a/substrate/test-utils/runtime/src/extrinsic.rs
+++ b/substrate/test-utils/runtime/src/extrinsic.rs
@@ -212,6 +212,7 @@ impl ExtrinsicBuilder {
 				self.metadata_hash
 					.map(CheckMetadataHash::new_with_custom_hash)
 					.unwrap_or_else(|| CheckMetadataHash::new(false)),
+				frame_system::WeightReclaim::new(),
 			);
 			let raw_payload = SignedPayload::from_raw(
 				self.function.clone(),
diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs
index 66677686531..4d24354f99a 100644
--- a/substrate/test-utils/runtime/src/lib.rs
+++ b/substrate/test-utils/runtime/src/lib.rs
@@ -155,6 +155,7 @@ pub type TxExtension = (
 	(CheckNonce<Runtime>, CheckWeight<Runtime>),
 	CheckSubstrateCall,
 	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 /// The payload being signed in transactions.
 pub type SignedPayload = sp_runtime::generic::SignedPayload<RuntimeCall, TxExtension>;
diff --git a/templates/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs
index 72eded5bfd1..972c7500f39 100644
--- a/templates/minimal/runtime/src/lib.rs
+++ b/templates/minimal/runtime/src/lib.rs
@@ -118,6 +118,10 @@ type TxExtension = (
 	// Ensures that the sender has enough funds to pay for the transaction
 	// and deducts the fee from the sender's account.
 	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+	// Reclaim the unused weight from the block using post dispatch information.
+	// It must be last in the pipeline in order to catch the refund in previous transaction
+	// extensions
+	frame_system::WeightReclaim<Runtime>,
 );
 
 // Composes the runtime by adding all the used pallets and deriving necessary types.
diff --git a/templates/parachain/runtime/Cargo.toml b/templates/parachain/runtime/Cargo.toml
index 9a0548106ed..83d7bf4c9b7 100644
--- a/templates/parachain/runtime/Cargo.toml
+++ b/templates/parachain/runtime/Cargo.toml
@@ -48,11 +48,11 @@ polkadot-sdk = { workspace = true, default-features = false, features = [
 
 	"cumulus-pallet-aura-ext",
 	"cumulus-pallet-session-benchmarking",
+	"cumulus-pallet-weight-reclaim",
 	"cumulus-pallet-xcm",
 	"cumulus-pallet-xcmp-queue",
 	"cumulus-primitives-aura",
 	"cumulus-primitives-core",
-	"cumulus-primitives-storage-weight-reclaim",
 	"cumulus-primitives-utility",
 	"pallet-collator-selection",
 	"parachains-common",
diff --git a/templates/parachain/runtime/src/benchmarks.rs b/templates/parachain/runtime/src/benchmarks.rs
index aae50e7258c..ca9d423bf85 100644
--- a/templates/parachain/runtime/src/benchmarks.rs
+++ b/templates/parachain/runtime/src/benchmarks.rs
@@ -33,4 +33,5 @@ polkadot_sdk::frame_benchmarking::define_benchmarks!(
 	[pallet_collator_selection, CollatorSelection]
 	[cumulus_pallet_parachain_system, ParachainSystem]
 	[cumulus_pallet_xcmp_queue, XcmpQueue]
+	[cumulus_pallet_weight_reclaim, WeightReclaim]
 );
diff --git a/templates/parachain/runtime/src/configs/mod.rs b/templates/parachain/runtime/src/configs/mod.rs
index ba4c71c7f21..1e9155f59a5 100644
--- a/templates/parachain/runtime/src/configs/mod.rs
+++ b/templates/parachain/runtime/src/configs/mod.rs
@@ -129,6 +129,11 @@ impl frame_system::Config for Runtime {
 	type MaxConsumers = frame_support::traits::ConstU32<16>;
 }
 
+/// Configure the palelt weight reclaim tx.
+impl cumulus_pallet_weight_reclaim::Config for Runtime {
+	type WeightInfo = ();
+}
+
 impl pallet_timestamp::Config for Runtime {
 	/// A timestamp: milliseconds since the unix epoch.
 	type Moment = u64;
diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs
index 9669237af78..0be27ecce73 100644
--- a/templates/parachain/runtime/src/lib.rs
+++ b/templates/parachain/runtime/src/lib.rs
@@ -75,18 +75,20 @@ pub type BlockId = generic::BlockId<Block>;
 
 /// The extension to the basic transaction logic.
 #[docify::export(template_signed_extra)]
-pub type TxExtension = (
-	frame_system::CheckNonZeroSender<Runtime>,
-	frame_system::CheckSpecVersion<Runtime>,
-	frame_system::CheckTxVersion<Runtime>,
-	frame_system::CheckGenesis<Runtime>,
-	frame_system::CheckEra<Runtime>,
-	frame_system::CheckNonce<Runtime>,
-	frame_system::CheckWeight<Runtime>,
-	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
-	cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim<Runtime>,
-	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
-);
+pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim<
+	Runtime,
+	(
+		frame_system::CheckNonZeroSender<Runtime>,
+		frame_system::CheckSpecVersion<Runtime>,
+		frame_system::CheckTxVersion<Runtime>,
+		frame_system::CheckGenesis<Runtime>,
+		frame_system::CheckEra<Runtime>,
+		frame_system::CheckNonce<Runtime>,
+		frame_system::CheckWeight<Runtime>,
+		pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+		frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	),
+>;
 
 /// Unchecked extrinsic type as expected by this runtime.
 pub type UncheckedExtrinsic =
@@ -272,6 +274,8 @@ mod runtime {
 	pub type Timestamp = pallet_timestamp;
 	#[runtime::pallet_index(3)]
 	pub type ParachainInfo = parachain_info;
+	#[runtime::pallet_index(4)]
+	pub type WeightReclaim = cumulus_pallet_weight_reclaim;
 
 	// Monetary stuff.
 	#[runtime::pallet_index(10)]
diff --git a/templates/solochain/node/src/benchmarking.rs b/templates/solochain/node/src/benchmarking.rs
index 0d60230cd19..467cad4c0aa 100644
--- a/templates/solochain/node/src/benchmarking.rs
+++ b/templates/solochain/node/src/benchmarking.rs
@@ -122,6 +122,7 @@ pub fn create_benchmark_extrinsic(
 		frame_system::CheckWeight::<runtime::Runtime>::new(),
 		pallet_transaction_payment::ChargeTransactionPayment::<runtime::Runtime>::from(0),
 		frame_metadata_hash_extension::CheckMetadataHash::<runtime::Runtime>::new(false),
+		frame_system::WeightReclaim::<runtime::Runtime>::new(),
 	);
 
 	let raw_payload = runtime::SignedPayload::from_raw(
@@ -137,6 +138,7 @@ pub fn create_benchmark_extrinsic(
 			(),
 			(),
 			None,
+			(),
 		),
 	);
 	let signature = raw_payload.using_encoded(|e| sender.sign(e));
diff --git a/templates/solochain/runtime/src/lib.rs b/templates/solochain/runtime/src/lib.rs
index ae0ea16ae42..6a2149ec8b6 100644
--- a/templates/solochain/runtime/src/lib.rs
+++ b/templates/solochain/runtime/src/lib.rs
@@ -157,6 +157,7 @@ pub type TxExtension = (
 	frame_system::CheckWeight<Runtime>,
 	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
 	frame_metadata_hash_extension::CheckMetadataHash<Runtime>,
+	frame_system::WeightReclaim<Runtime>,
 );
 
 /// Unchecked extrinsic type as expected by this runtime.
diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml
index d2a47ade7f8..17a7c02e825 100644
--- a/umbrella/Cargo.toml
+++ b/umbrella/Cargo.toml
@@ -29,6 +29,7 @@ std = [
 	"cumulus-pallet-parachain-system?/std",
 	"cumulus-pallet-session-benchmarking?/std",
 	"cumulus-pallet-solo-to-para?/std",
+	"cumulus-pallet-weight-reclaim?/std",
 	"cumulus-pallet-xcm?/std",
 	"cumulus-pallet-xcmp-queue?/std",
 	"cumulus-ping?/std",
@@ -239,6 +240,7 @@ runtime-benchmarks = [
 	"cumulus-pallet-dmp-queue?/runtime-benchmarks",
 	"cumulus-pallet-parachain-system?/runtime-benchmarks",
 	"cumulus-pallet-session-benchmarking?/runtime-benchmarks",
+	"cumulus-pallet-weight-reclaim?/runtime-benchmarks",
 	"cumulus-pallet-xcmp-queue?/runtime-benchmarks",
 	"cumulus-primitives-core?/runtime-benchmarks",
 	"cumulus-primitives-utility?/runtime-benchmarks",
@@ -369,6 +371,7 @@ try-runtime = [
 	"cumulus-pallet-dmp-queue?/try-runtime",
 	"cumulus-pallet-parachain-system?/try-runtime",
 	"cumulus-pallet-solo-to-para?/try-runtime",
+	"cumulus-pallet-weight-reclaim?/try-runtime",
 	"cumulus-pallet-xcm?/try-runtime",
 	"cumulus-pallet-xcmp-queue?/try-runtime",
 	"cumulus-ping?/try-runtime",
@@ -540,7 +543,7 @@ with-tracing = [
 	"sp-tracing?/with-tracing",
 	"sp-tracing?/with-tracing",
 ]
-runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"]
+runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"]
 runtime = [
 	"frame-benchmarking",
 	"frame-benchmarking-pallet-pov",
@@ -722,6 +725,11 @@ default-features = false
 optional = true
 path = "../cumulus/pallets/solo-to-para"
 
+[dependencies.cumulus-pallet-weight-reclaim]
+default-features = false
+optional = true
+path = "../cumulus/pallets/weight-reclaim"
+
 [dependencies.cumulus-pallet-xcm]
 default-features = false
 optional = true
diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs
index 7b3c869588f..3504f081f29 100644
--- a/umbrella/src/lib.rs
+++ b/umbrella/src/lib.rs
@@ -141,6 +141,10 @@ pub use cumulus_pallet_session_benchmarking;
 #[cfg(feature = "cumulus-pallet-solo-to-para")]
 pub use cumulus_pallet_solo_to_para;
 
+/// pallet and transaction extensions for accurate proof size reclaim.
+#[cfg(feature = "cumulus-pallet-weight-reclaim")]
+pub use cumulus_pallet_weight_reclaim;
+
 /// Pallet for stuff specific to parachains' usage of XCM.
 #[cfg(feature = "cumulus-pallet-xcm")]
 pub use cumulus_pallet_xcm;
-- 
GitLab


From 8d2130cce6701195d7c1e97947f49dc3963ea996 Mon Sep 17 00:00:00 2001
From: Guillaume Thiolliere <gui.thiolliere@gmail.com>
Date: Mon, 6 Jan 2025 17:19:29 +0900
Subject: [PATCH 019/116] Print taplo version in CI (#7041)

I can't find taplo version in the log, and current version is
incompatible with latest version.
---
 .github/workflows/reusable-preflight.yml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/.github/workflows/reusable-preflight.yml b/.github/workflows/reusable-preflight.yml
index e1799adddca..8487ab107d7 100644
--- a/.github/workflows/reusable-preflight.yml
+++ b/.github/workflows/reusable-preflight.yml
@@ -203,6 +203,7 @@ jobs:
           echo $( substrate-contracts-node --version | awk 'NF' )
           estuary --version
           cargo-contract --version
+          taplo --version
 
       - name: Info forklift
         run: forklift version
-- 
GitLab


From 6eca7647dc99dd0e78aacb740ba931e99e6ba71f Mon Sep 17 00:00:00 2001
From: taozui472 <taozui472@gmail.com>
Date: Mon, 6 Jan 2025 16:44:06 +0800
Subject: [PATCH 020/116] chore: delete repeat words (#7034)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Co-authored-by: Dónal Murray <donal.murray@parity.io>
---
 polkadot/primitives/src/v8/mod.rs                               | 2 +-
 polkadot/roadmap/implementers-guide/src/architecture.md         | 2 +-
 polkadot/roadmap/implementers-guide/src/protocol-approval.md    | 2 +-
 .../implementers-guide/src/protocol-validator-disabling.md      | 2 +-
 polkadot/roadmap/implementers-guide/src/runtime/session_info.md | 2 +-
 substrate/client/network/README.md                              | 2 +-
 substrate/frame/democracy/README.md                             | 2 +-
 substrate/frame/democracy/src/lib.rs                            | 2 +-
 substrate/frame/recovery/README.md                              | 2 +-
 substrate/frame/recovery/src/lib.rs                             | 2 +-
 10 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/polkadot/primitives/src/v8/mod.rs b/polkadot/primitives/src/v8/mod.rs
index fdcb9fe8fb7..7fc4c5b5c3f 100644
--- a/polkadot/primitives/src/v8/mod.rs
+++ b/polkadot/primitives/src/v8/mod.rs
@@ -1900,7 +1900,7 @@ pub struct SessionInfo {
 	/// participating in parachain consensus. See
 	/// [`max_validators`](https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148).
 	///
-	/// `SessionInfo::validators` will be limited to to `max_validators` when set.
+	/// `SessionInfo::validators` will be limited to `max_validators` when set.
 	pub validators: IndexedVec<ValidatorIndex, ValidatorId>,
 	/// Validators' authority discovery keys for the session in canonical ordering.
 	///
diff --git a/polkadot/roadmap/implementers-guide/src/architecture.md b/polkadot/roadmap/implementers-guide/src/architecture.md
index b7527066200..e2be92e4cdd 100644
--- a/polkadot/roadmap/implementers-guide/src/architecture.md
+++ b/polkadot/roadmap/implementers-guide/src/architecture.md
@@ -93,7 +93,7 @@ Runtime logic is divided up into Modules and APIs. Modules encapsulate particula
 consist of storage, routines, and entry-points. Routines are invoked by entry points, by other modules, upon block
 initialization or closing. Routines can read and alter the storage of the module. Entry-points are the means by which
 new information is introduced to a module and can limit the origins (user, root, parachain) that they accept being
-called by. Each block in the blockchain contains a set of Extrinsics. Each extrinsic targets a a specific entry point to
+called by. Each block in the blockchain contains a set of Extrinsics. Each extrinsic targets a specific entry point to
 trigger and which data should be passed to it. Runtime APIs provide a means for Node-side behavior to extract meaningful
 information from the state of a single fork.
 
diff --git a/polkadot/roadmap/implementers-guide/src/protocol-approval.md b/polkadot/roadmap/implementers-guide/src/protocol-approval.md
index b6aa16646ad..25d4fa5dada 100644
--- a/polkadot/roadmap/implementers-guide/src/protocol-approval.md
+++ b/polkadot/roadmap/implementers-guide/src/protocol-approval.md
@@ -84,7 +84,7 @@ slashing risk for validator operators.
 
 In future, we shall determine which among the several hardening techniques best benefits the network as a whole.  We
 could provide a multi-process multi-machine architecture for validators, perhaps even reminiscent of GNUNet, or perhaps
-more resembling smart HSM tooling.  We might instead design a system that more resembled full systems, like like Cosmos'
+more resembling smart HSM tooling.  We might instead design a system that more resembled full systems, like Cosmos'
 sentry nodes.  In either case, approval assignments might be handled by a slightly hardened machine, but not necessarily
 nearly as hardened as approval votes, but approval votes machines must similarly run foreign WASM code, which increases
 their risk, so assignments being separate sounds helpful.
diff --git a/polkadot/roadmap/implementers-guide/src/protocol-validator-disabling.md b/polkadot/roadmap/implementers-guide/src/protocol-validator-disabling.md
index 9fd44c00fa0..c2861b4035e 100644
--- a/polkadot/roadmap/implementers-guide/src/protocol-validator-disabling.md
+++ b/polkadot/roadmap/implementers-guide/src/protocol-validator-disabling.md
@@ -111,7 +111,7 @@ checking (% for 30-ish malicious in a row).
 There are also censorship or liveness issues if backing is suddenly dominated by malicious nodes but in general even if
 some honest blocks get backed liveness should be preserved.
 
-> **Note:** It is worth noting that is is fundamentally a defense in depth strategy because if we assume disputes are
+> **Note:** It is worth noting that is fundamentally a defense in depth strategy because if we assume disputes are
 > perfect it should not be a real concern. In reality disputes and determinism are difficult to get right, and
 > non-determinism and happen so defense in depth is crucial when handling those subsystems.
 
diff --git a/polkadot/roadmap/implementers-guide/src/runtime/session_info.md b/polkadot/roadmap/implementers-guide/src/runtime/session_info.md
index fa7f55c4f0b..daf7e5c7fd8 100644
--- a/polkadot/roadmap/implementers-guide/src/runtime/session_info.md
+++ b/polkadot/roadmap/implementers-guide/src/runtime/session_info.md
@@ -16,7 +16,7 @@ struct SessionInfo {
     /// in parachain consensus. See
     /// [`max_validators`](https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148).
     ///
-    /// `SessionInfo::validators` will be limited to to `max_validators` when set.
+    /// `SessionInfo::validators` will be limited to `max_validators` when set.
     validators: Vec<ValidatorId>,
     /// Validators' authority discovery keys for the session in canonical ordering.
     ///
diff --git a/substrate/client/network/README.md b/substrate/client/network/README.md
index f4031fbd308..9903109d847 100644
--- a/substrate/client/network/README.md
+++ b/substrate/client/network/README.md
@@ -261,7 +261,7 @@ data. I.e. it is unable to serve bock bodies and headers other than the most rec
 nodes have block history available, a background sync process is started that downloads all the missing blocks.
 It is run in parallel with the keep-up sync and does not interfere with downloading of the recent blocks.
 During this download we also import GRANDPA justifications for blocks with authority set changes, so that
-the warp-synced node has all the data to serve for other nodes nodes that might want to sync from it with
+the warp-synced node has all the data to serve for other nodes that might want to sync from it with
 any method.
 
 # Usage
diff --git a/substrate/frame/democracy/README.md b/substrate/frame/democracy/README.md
index bbc5f1c6558..d9d21e62447 100644
--- a/substrate/frame/democracy/README.md
+++ b/substrate/frame/democracy/README.md
@@ -96,7 +96,7 @@ This call can only be made by the `CancellationOrigin`.
 
 This call can only be made by the `ExternalOrigin`.
 
-- `external_propose` - Schedules a proposal to become a referendum once it is is legal
+- `external_propose` - Schedules a proposal to become a referendum once it is legal
   for an externally proposed referendum.
 
 #### External Majority Origin
diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs
index 27bc36a756e..2c662fbad26 100644
--- a/substrate/frame/democracy/src/lib.rs
+++ b/substrate/frame/democracy/src/lib.rs
@@ -113,7 +113,7 @@
 //!
 //! This call can only be made by the `ExternalOrigin`.
 //!
-//! - `external_propose` - Schedules a proposal to become a referendum once it is is legal for an
+//! - `external_propose` - Schedules a proposal to become a referendum once it is legal for an
 //!   externally proposed referendum.
 //!
 //! #### External Majority Origin
diff --git a/substrate/frame/recovery/README.md b/substrate/frame/recovery/README.md
index 7e2dd7a2361..fdaef5784fd 100644
--- a/substrate/frame/recovery/README.md
+++ b/substrate/frame/recovery/README.md
@@ -67,7 +67,7 @@ permissionless. However, the recovery deposit is an economic deterrent that
 should disincentivize would-be attackers from trying to maliciously recover
 accounts.
 
-The recovery deposit can always be claimed by the account which is trying to
+The recovery deposit can always be claimed by the account which is trying
 to be recovered. In the case of a malicious recovery attempt, the account
 owner who still has access to their account can claim the deposit and
 essentially punish the malicious user.
diff --git a/substrate/frame/recovery/src/lib.rs b/substrate/frame/recovery/src/lib.rs
index d8f3c33fbea..4de1919cdc3 100644
--- a/substrate/frame/recovery/src/lib.rs
+++ b/substrate/frame/recovery/src/lib.rs
@@ -80,7 +80,7 @@
 //! should disincentivize would-be attackers from trying to maliciously recover
 //! accounts.
 //!
-//! The recovery deposit can always be claimed by the account which is trying to
+//! The recovery deposit can always be claimed by the account which is trying
 //! to be recovered. In the case of a malicious recovery attempt, the account
 //! owner who still has access to their account can claim the deposit and
 //! essentially punish the malicious user.
-- 
GitLab


From ffa90d0f2b9b4438e2f0fa3d4d532923d7ba978f Mon Sep 17 00:00:00 2001
From: Alin Dima <alin@parity.io>
Date: Mon, 6 Jan 2025 11:57:29 +0200
Subject: [PATCH 021/116] fix chunk fetching network compatibility zombienet
 test (#6988)

Fix this zombienet test

It was failing because in
https://github.com/paritytech/polkadot-sdk/pull/6452 I enabled the v2
receipts for testnet genesis,
so the collators started sending v2 receipts with zeroed collator
signatures to old validators that were still checking those signatures
(which lead to disputes, since new validators considered the candidates
valid).

The fix is to also use an old image for collators, so that we don't
create v2 receipts.

We cannot remove this test yet because collators also perform chunk
recovery, so until all collators are upgraded, we need to maintain this
compatibility with the old protocol version (which is also why
systematic recovery was not yet enabled)
---
 .../0014-chunk-fetching-network-compatibility.toml           | 3 ++-
 prdoc/pr_6988.prdoc                                          | 5 +++++
 2 files changed, 7 insertions(+), 1 deletion(-)
 create mode 100644 prdoc/pr_6988.prdoc

diff --git a/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml b/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml
index 881abab64fd..874b8a09bb2 100644
--- a/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml
+++ b/polkadot/zombienet_tests/functional/0014-chunk-fetching-network-compatibility.toml
@@ -42,7 +42,8 @@ chain = "glutton-westend-local-{{id}}"
 
     [parachains.collator]
     name = "collator"
-    image = "{{CUMULUS_IMAGE}}"
+    # Use an old image that does not send out v2 receipts, as the old validators will still check the collator signatures.
+    image = "docker.io/paritypr/polkadot-parachain-debug:master-bde0bbe5"
     args = ["-lparachain=debug"]
 
 {% endfor %}
diff --git a/prdoc/pr_6988.prdoc b/prdoc/pr_6988.prdoc
new file mode 100644
index 00000000000..18f70f9fd97
--- /dev/null
+++ b/prdoc/pr_6988.prdoc
@@ -0,0 +1,5 @@
+doc: []
+
+crates:
+  - name: polkadot
+    bump: none
\ No newline at end of file
-- 
GitLab


From 1dcff3df39b85fa43c7ca1dafe10f802cd812234 Mon Sep 17 00:00:00 2001
From: Sebastian Kunert <skunert49@gmail.com>
Date: Mon, 6 Jan 2025 14:09:06 +0100
Subject: [PATCH 022/116] Avoid incomplete block import pipeline with full
 verifying import queue (#7050)

## Problem
In the parachain template we use the [fully verifying import queue

](https://github.com/paritytech/polkadot-sdk/blob/3d9eddbeb262277c79f2b93b9efb5af95a3a35a8/cumulus/client/consensus/aura/src/equivocation_import_queue.rs#L224-L224)
which does extra equivocation checks.

However, when we import a warp synced block with state, we don't set a
fork choice, leading to an incomplete block import pipeline and error
here:
https://github.com/paritytech/polkadot-sdk/blob/3d9eddbeb262277c79f2b93b9efb5af95a3a35a8/substrate/client/service/src/client/client.rs#L488-L488

This renders warp sync useless for chains using this import queue.

## Fix
The fix is to always import a block with state as best block, as we
already do in the normal Aura Verifier.
In a follow up we should also take another look into unifying the usage
of the different import queues.

fixes https://github.com/paritytech/project-mythical/issues/256

---------

Co-authored-by: command-bot <>
---
 .../consensus/aura/src/equivocation_import_queue.rs   |  1 +
 prdoc/pr_7050.prdoc                                   | 11 +++++++++++
 2 files changed, 12 insertions(+)
 create mode 100644 prdoc/pr_7050.prdoc

diff --git a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs
index 68f2d37c874..dbd9d5ba6a6 100644
--- a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs
+++ b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs
@@ -97,6 +97,7 @@ where
 		// This is done for example when gap syncing and it is expected that the block after the gap
 		// was checked/chosen properly, e.g. by warp syncing to this block using a finality proof.
 		if block_params.state_action.skip_execution_checks() || block_params.with_state() {
+			block_params.fork_choice = Some(ForkChoiceStrategy::Custom(block_params.with_state()));
 			return Ok(block_params)
 		}
 
diff --git a/prdoc/pr_7050.prdoc b/prdoc/pr_7050.prdoc
new file mode 100644
index 00000000000..da9dd808033
--- /dev/null
+++ b/prdoc/pr_7050.prdoc
@@ -0,0 +1,11 @@
+title: Avoid incomplete block import pipeline with full verifying import queue
+doc:
+- audience: Node Dev
+  description: |-
+    When warp syncing a node using the equivocation checking verifier, we now properly set the fork_choice rule.
+    Affected are mostly nodes that are derived from the parachain template. Omni-node is not affected.
+
+    The prevents the error `ClientImport("Incomplete block import pipeline.")` after state sync.
+crates:
+- name: cumulus-client-consensus-aura
+  bump: patch
-- 
GitLab


From 568231a9a85d94954c002532a0f4351a3bb59e83 Mon Sep 17 00:00:00 2001
From: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
Date: Mon, 6 Jan 2025 14:52:07 +0100
Subject: [PATCH 023/116] [core-fellowship] Add permissionless import_member
 (#7030)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Changes:
- Add call `import_member` to the core-fellowship pallet.
- Move common logic between `import` and `import_member` into
`do_import`.

## `import_member`

Can be used to induct an arbitrary collective member and is callable by
any signed origin. Pays no fees upon success.
This is useful in the case that members did not induct themselves and
are idling on their rank.

---------

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
Co-authored-by: Bastian Köcher <git@kchr.de>
Co-authored-by: command-bot <>
---
 .../pallet_core_fellowship_ambassador_core.rs | 160 +++++++++------
 .../pallet_core_fellowship_fellowship_core.rs | 183 +++++++++++-------
 prdoc/pr_7030.prdoc                           |  24 +++
 .../frame/core-fellowship/src/benchmarking.rs |  18 ++
 substrate/frame/core-fellowship/src/lib.rs    |  67 +++++--
 .../core-fellowship/src/tests/integration.rs  |  38 +++-
 .../frame/core-fellowship/src/tests/unit.rs   |  62 ++++++
 .../frame/core-fellowship/src/weights.rs      |  71 ++++---
 8 files changed, 448 insertions(+), 175 deletions(-)
 create mode 100644 prdoc/pr_7030.prdoc

diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs
index 6bedfcc7e01..4d092ec8031 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs
@@ -1,4 +1,4 @@
-// Copyright Parity Technologies (UK) Ltd.
+// Copyright (C) Parity Technologies (UK) Ltd.
 // This file is part of Cumulus.
 
 // Cumulus is free software: you can redistribute it and/or modify
@@ -16,25 +16,29 @@
 
 //! Autogenerated weights for `pallet_core_fellowship`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `cob`, CPU: `<UNKNOWN>`
-//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024
+//! HOSTNAME: `623e9e4b814e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024
 
 // Executed Command:
-// target/release/polkadot-parachain
+// frame-omni-bencher
+// v1
 // benchmark
 // pallet
-// --chain=collectives-polkadot-dev
-// --wasm-execution=compiled
-// --pallet=pallet_core_fellowship
 // --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/
+// --runtime=target/production/wbuild/collectives-westend-runtime/collectives_westend_runtime.wasm
+// --pallet=pallet_core_fellowship
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights
+// --wasm-execution=compiled
+// --steps=50
+// --repeat=20
+// --heap-pages=4096
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -48,25 +52,26 @@ use core::marker::PhantomData;
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<T> {
 	/// Storage: `AmbassadorCore::Params` (r:0 w:1)
-	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	fn set_params() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 11_000_000 picoseconds.
-		Weight::from_parts(11_000_000, 0)
+		// Minimum execution time: 9_131_000 picoseconds.
+		Weight::from_parts(9_371_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: `AmbassadorCore::Params` (r:0 w:1)
-	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCore::Params` (r:1 w:1)
+	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	fn set_partial_params() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 11_000_000 picoseconds.
-		Weight::from_parts(11_000_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `471`
+		//  Estimated: `1853`
+		// Minimum execution time: 18_375_000 picoseconds.
+		Weight::from_parts(18_872_000, 0)
+			.saturating_add(Weight::from_parts(0, 1853))
+			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `AmbassadorCore::Member` (r:1 w:1)
@@ -74,44 +79,48 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Storage: `AmbassadorCollective::Members` (r:1 w:1)
 	/// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCore::Params` (r:1 w:0)
-	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1)
 	/// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
-	/// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:0)
+	/// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:1)
 	/// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1)
 	/// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1)
+	/// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	fn bump_offboard() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `66011`
+		//  Measured:  `66402`
 		//  Estimated: `69046`
-		// Minimum execution time: 96_000_000 picoseconds.
-		Weight::from_parts(111_000_000, 0)
+		// Minimum execution time: 156_752_000 picoseconds.
+		Weight::from_parts(164_242_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(4))
+			.saturating_add(T::DbWeight::get().writes(6))
 	}
 	/// Storage: `AmbassadorCore::Member` (r:1 w:1)
 	/// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCollective::Members` (r:1 w:1)
 	/// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCore::Params` (r:1 w:0)
-	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1)
 	/// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
-	/// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:0)
+	/// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:1)
 	/// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1)
 	/// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1)
+	/// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	fn bump_demote() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `66121`
+		//  Measured:  `66512`
 		//  Estimated: `69046`
-		// Minimum execution time: 99_000_000 picoseconds.
-		Weight::from_parts(116_000_000, 0)
+		// Minimum execution time: 158_877_000 picoseconds.
+		Weight::from_parts(165_228_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(4))
+			.saturating_add(T::DbWeight::get().writes(6))
 	}
 	/// Storage: `AmbassadorCollective::Members` (r:1 w:0)
 	/// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
@@ -121,8 +130,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `360`
 		//  Estimated: `3514`
-		// Minimum execution time: 21_000_000 picoseconds.
-		Weight::from_parts(22_000_000, 0)
+		// Minimum execution time: 25_056_000 picoseconds.
+		Weight::from_parts(26_028_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -141,8 +150,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `118`
 		//  Estimated: `3514`
-		// Minimum execution time: 36_000_000 picoseconds.
-		Weight::from_parts(36_000_000, 0)
+		// Minimum execution time: 34_784_000 picoseconds.
+		Weight::from_parts(35_970_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(3))
 			.saturating_add(T::DbWeight::get().writes(5))
@@ -152,7 +161,7 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Storage: `AmbassadorCore::Member` (r:1 w:1)
 	/// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCore::Params` (r:1 w:0)
-	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1)
 	/// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
 	/// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1)
@@ -163,25 +172,40 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	fn promote() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `65989`
+		//  Measured:  `66055`
 		//  Estimated: `69046`
-		// Minimum execution time: 95_000_000 picoseconds.
-		Weight::from_parts(110_000_000, 0)
+		// Minimum execution time: 147_616_000 picoseconds.
+		Weight::from_parts(154_534_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(5))
 			.saturating_add(T::DbWeight::get().writes(6))
 	}
+	/// Storage: `AmbassadorCollective::Members` (r:1 w:1)
+	/// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCore::Member` (r:1 w:1)
+	/// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCollective::MemberCount` (r:9 w:9)
+	/// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1)
+	/// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCollective::IndexToId` (r:0 w:9)
+	/// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:9)
+	/// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
+	/// The range of component `r` is `[1, 9]`.
+	/// The range of component `r` is `[1, 9]`.
 	fn promote_fast(r: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `16844`
-		//  Estimated: `19894 + r * (2489 ±0)`
-		// Minimum execution time: 45_065_000 picoseconds.
-		Weight::from_parts(34_090_392, 19894)
-			// Standard Error: 18_620
-			.saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into()))
-			.saturating_add(T::DbWeight::get().reads(3_u64))
+		//  Measured:  `65968`
+		//  Estimated: `69046 + r * (2489 ±0)`
+		// Minimum execution time: 138_323_000 picoseconds.
+		Weight::from_parts(125_497_264, 0)
+			.saturating_add(Weight::from_parts(0, 69046))
+			// Standard Error: 56_050
+			.saturating_add(Weight::from_parts(19_863_853, 0).saturating_mul(r.into()))
+			.saturating_add(T::DbWeight::get().reads(3))
 			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into())))
-			.saturating_add(T::DbWeight::get().writes(3_u64))
+			.saturating_add(T::DbWeight::get().writes(3))
 			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into())))
 			.saturating_add(Weight::from_parts(0, 2489).saturating_mul(r.into()))
 	}
@@ -193,10 +217,10 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
 	fn offboard() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `331`
+		//  Measured:  `265`
 		//  Estimated: `3514`
-		// Minimum execution time: 21_000_000 picoseconds.
-		Weight::from_parts(22_000_000, 0)
+		// Minimum execution time: 26_903_000 picoseconds.
+		Weight::from_parts(27_645_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
@@ -209,8 +233,22 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `285`
 		//  Estimated: `3514`
-		// Minimum execution time: 20_000_000 picoseconds.
-		Weight::from_parts(21_000_000, 0)
+		// Minimum execution time: 23_286_000 picoseconds.
+		Weight::from_parts(23_848_000, 0)
+			.saturating_add(Weight::from_parts(0, 3514))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	/// Storage: `AmbassadorCore::Member` (r:1 w:1)
+	/// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
+	/// Storage: `AmbassadorCollective::Members` (r:1 w:0)
+	/// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
+	fn import_member() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `285`
+		//  Estimated: `3514`
+		// Minimum execution time: 23_239_000 picoseconds.
+		Weight::from_parts(23_684_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -225,8 +263,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `65967`
 		//  Estimated: `69046`
-		// Minimum execution time: 78_000_000 picoseconds.
-		Weight::from_parts(104_000_000, 0)
+		// Minimum execution time: 125_987_000 picoseconds.
+		Weight::from_parts(130_625_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(3))
 			.saturating_add(T::DbWeight::get().writes(2))
@@ -239,8 +277,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `151`
 		//  Estimated: `69046`
-		// Minimum execution time: 43_000_000 picoseconds.
-		Weight::from_parts(44_000_000, 0)
+		// Minimum execution time: 104_431_000 picoseconds.
+		Weight::from_parts(106_646_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs
index 05014e273f0..acb1f82985d 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs
@@ -1,39 +1,44 @@
 // Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
+// This file is part of Cumulus.
 
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// 	http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
 
 //! Autogenerated weights for `pallet_core_fellowship`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `cob`, CPU: `<UNKNOWN>`
-//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024
+//! HOSTNAME: `623e9e4b814e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024
 
 // Executed Command:
-// target/release/polkadot-parachain
+// frame-omni-bencher
+// v1
 // benchmark
 // pallet
-// --chain=collectives-polkadot-dev
-// --wasm-execution=compiled
-// --pallet=pallet_core_fellowship
 // --extrinsic=*
-// --steps=2
-// --repeat=2
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/
+// --runtime=target/production/wbuild/collectives-westend-runtime/collectives_westend_runtime.wasm
+// --pallet=pallet_core_fellowship
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights
+// --wasm-execution=compiled
+// --steps=50
+// --repeat=20
+// --heap-pages=4096
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -47,25 +52,26 @@ use core::marker::PhantomData;
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<T> {
 	/// Storage: `FellowshipCore::Params` (r:0 w:1)
-	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	fn set_params() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 11_000_000 picoseconds.
-		Weight::from_parts(12_000_000, 0)
+		// Minimum execution time: 9_115_000 picoseconds.
+		Weight::from_parts(9_523_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: `FellowshipCore::Params` (r:0 w:1)
-	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCore::Params` (r:1 w:1)
+	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	fn set_partial_params() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 11_000_000 picoseconds.
-		Weight::from_parts(12_000_000, 0)
-			.saturating_add(Weight::from_parts(0, 0))
+		//  Measured:  `504`
+		//  Estimated: `1853`
+		// Minimum execution time: 18_294_000 picoseconds.
+		Weight::from_parts(18_942_000, 0)
+			.saturating_add(Weight::from_parts(0, 1853))
+			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `FellowshipCore::Member` (r:1 w:1)
@@ -73,44 +79,48 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Storage: `FellowshipCollective::Members` (r:1 w:1)
 	/// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCore::Params` (r:1 w:0)
-	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCollective::MemberCount` (r:1 w:1)
 	/// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
-	/// Storage: `FellowshipCollective::IdToIndex` (r:1 w:0)
+	/// Storage: `FellowshipCollective::IdToIndex` (r:1 w:1)
 	/// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1)
 	/// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCollective::IndexToId` (r:0 w:1)
+	/// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	fn bump_offboard() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `66144`
+		//  Measured:  `66535`
 		//  Estimated: `69046`
-		// Minimum execution time: 109_000_000 picoseconds.
-		Weight::from_parts(125_000_000, 0)
+		// Minimum execution time: 152_823_000 picoseconds.
+		Weight::from_parts(158_737_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(4))
+			.saturating_add(T::DbWeight::get().writes(6))
 	}
 	/// Storage: `FellowshipCore::Member` (r:1 w:1)
 	/// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCollective::Members` (r:1 w:1)
 	/// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCore::Params` (r:1 w:0)
-	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCollective::MemberCount` (r:1 w:1)
 	/// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
-	/// Storage: `FellowshipCollective::IdToIndex` (r:1 w:0)
+	/// Storage: `FellowshipCollective::IdToIndex` (r:1 w:1)
 	/// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1)
 	/// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCollective::IndexToId` (r:0 w:1)
+	/// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	fn bump_demote() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `66254`
+		//  Measured:  `66645`
 		//  Estimated: `69046`
-		// Minimum execution time: 112_000_000 picoseconds.
-		Weight::from_parts(114_000_000, 0)
+		// Minimum execution time: 157_605_000 picoseconds.
+		Weight::from_parts(162_341_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(4))
+			.saturating_add(T::DbWeight::get().writes(6))
 	}
 	/// Storage: `FellowshipCollective::Members` (r:1 w:0)
 	/// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
@@ -120,8 +130,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `493`
 		//  Estimated: `3514`
-		// Minimum execution time: 22_000_000 picoseconds.
-		Weight::from_parts(27_000_000, 0)
+		// Minimum execution time: 25_194_000 picoseconds.
+		Weight::from_parts(26_262_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -140,8 +150,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `251`
 		//  Estimated: `3514`
-		// Minimum execution time: 35_000_000 picoseconds.
-		Weight::from_parts(36_000_000, 0)
+		// Minimum execution time: 35_479_000 picoseconds.
+		Weight::from_parts(36_360_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(3))
 			.saturating_add(T::DbWeight::get().writes(5))
@@ -151,7 +161,7 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Storage: `FellowshipCore::Member` (r:1 w:1)
 	/// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCore::Params` (r:1 w:0)
-	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`)
+	/// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCollective::MemberCount` (r:1 w:1)
 	/// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
 	/// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1)
@@ -162,25 +172,40 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
 	fn promote() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `66122`
+		//  Measured:  `66188`
 		//  Estimated: `69046`
-		// Minimum execution time: 97_000_000 picoseconds.
-		Weight::from_parts(129_000_000, 0)
+		// Minimum execution time: 147_993_000 picoseconds.
+		Weight::from_parts(153_943_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(5))
 			.saturating_add(T::DbWeight::get().writes(6))
 	}
+	/// Storage: `FellowshipCollective::Members` (r:1 w:1)
+	/// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCore::Member` (r:1 w:1)
+	/// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCollective::MemberCount` (r:9 w:9)
+	/// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1)
+	/// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCollective::IndexToId` (r:0 w:9)
+	/// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCollective::IdToIndex` (r:0 w:9)
+	/// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`)
+	/// The range of component `r` is `[1, 9]`.
+	/// The range of component `r` is `[1, 9]`.
 	fn promote_fast(r: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `16844`
-		//  Estimated: `19894 + r * (2489 ±0)`
-		// Minimum execution time: 45_065_000 picoseconds.
-		Weight::from_parts(34_090_392, 19894)
-			// Standard Error: 18_620
-			.saturating_add(Weight::from_parts(13_578_046, 0).saturating_mul(r.into()))
-			.saturating_add(T::DbWeight::get().reads(3_u64))
+		//  Measured:  `66101`
+		//  Estimated: `69046 + r * (2489 ±0)`
+		// Minimum execution time: 138_444_000 picoseconds.
+		Weight::from_parts(125_440_035, 0)
+			.saturating_add(Weight::from_parts(0, 69046))
+			// Standard Error: 55_452
+			.saturating_add(Weight::from_parts(19_946_954, 0).saturating_mul(r.into()))
+			.saturating_add(T::DbWeight::get().reads(3))
 			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into())))
-			.saturating_add(T::DbWeight::get().writes(3_u64))
+			.saturating_add(T::DbWeight::get().writes(3))
 			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into())))
 			.saturating_add(Weight::from_parts(0, 2489).saturating_mul(r.into()))
 	}
@@ -192,10 +217,10 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 	/// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`)
 	fn offboard() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `464`
+		//  Measured:  `398`
 		//  Estimated: `3514`
-		// Minimum execution time: 22_000_000 picoseconds.
-		Weight::from_parts(22_000_000, 0)
+		// Minimum execution time: 27_392_000 picoseconds.
+		Weight::from_parts(28_134_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
@@ -208,8 +233,22 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `418`
 		//  Estimated: `3514`
-		// Minimum execution time: 20_000_000 picoseconds.
-		Weight::from_parts(24_000_000, 0)
+		// Minimum execution time: 23_523_000 picoseconds.
+		Weight::from_parts(24_046_000, 0)
+			.saturating_add(Weight::from_parts(0, 3514))
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	/// Storage: `FellowshipCore::Member` (r:1 w:1)
+	/// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
+	/// Storage: `FellowshipCollective::Members` (r:1 w:0)
+	/// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
+	fn import_member() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `418`
+		//  Estimated: `3514`
+		// Minimum execution time: 23_369_000 picoseconds.
+		Weight::from_parts(24_088_000, 0)
 			.saturating_add(Weight::from_parts(0, 3514))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -224,8 +263,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `66100`
 		//  Estimated: `69046`
-		// Minimum execution time: 89_000_000 picoseconds.
-		Weight::from_parts(119_000_000, 0)
+		// Minimum execution time: 127_137_000 picoseconds.
+		Weight::from_parts(131_638_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(3))
 			.saturating_add(T::DbWeight::get().writes(2))
@@ -238,8 +277,8 @@ impl<T: frame_system::Config> pallet_core_fellowship::WeightInfo for WeightInfo<
 		// Proof Size summary in bytes:
 		//  Measured:  `184`
 		//  Estimated: `69046`
-		// Minimum execution time: 43_000_000 picoseconds.
-		Weight::from_parts(52_000_000, 0)
+		// Minimum execution time: 103_212_000 picoseconds.
+		Weight::from_parts(105_488_000, 0)
 			.saturating_add(Weight::from_parts(0, 69046))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
diff --git a/prdoc/pr_7030.prdoc b/prdoc/pr_7030.prdoc
new file mode 100644
index 00000000000..3b1f7be558d
--- /dev/null
+++ b/prdoc/pr_7030.prdoc
@@ -0,0 +1,24 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: "[core-fellowship] Add permissionless import_member"
+
+doc:
+  - audience: [Runtime Dev, Runtime User]
+    description: |
+      Changes:
+      - Add call `import_member` to the core-fellowship pallet.
+      - Move common logic between `import` and `import_member` into `do_import`.
+
+      This is a minor change as to not impact UI and downstream integration.
+
+      ## `import_member`
+
+      Can be used to induct an arbitrary collective member and is callable by any signed origin. Pays no fees upon success.  
+      This is useful in the case that members did not induct themselves and are idling on their rank.
+
+crates:
+- name: pallet-core-fellowship
+  bump: major
+- name: collectives-westend-runtime
+  bump: patch
diff --git a/substrate/frame/core-fellowship/src/benchmarking.rs b/substrate/frame/core-fellowship/src/benchmarking.rs
index adb8a4a091b..ac0d489953c 100644
--- a/substrate/frame/core-fellowship/src/benchmarking.rs
+++ b/substrate/frame/core-fellowship/src/benchmarking.rs
@@ -50,6 +50,7 @@ mod benchmarks {
 		for _ in 0..rank {
 			T::Members::promote(&member)?;
 		}
+		#[allow(deprecated)]
 		CoreFellowship::<T, I>::import(RawOrigin::Signed(member.clone()).into())?;
 		Ok(member)
 	}
@@ -260,6 +261,23 @@ mod benchmarks {
 		Ok(())
 	}
 
+	#[benchmark]
+	fn import_member() -> Result<(), BenchmarkError> {
+		let member = account("member", 0, SEED);
+		let sender = account("sender", 0, SEED);
+
+		T::Members::induct(&member)?;
+		T::Members::promote(&member)?;
+
+		assert!(!Member::<T, I>::contains_key(&member));
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(sender), member.clone());
+
+		assert!(Member::<T, I>::contains_key(&member));
+		Ok(())
+	}
+
 	#[benchmark]
 	fn approve() -> Result<(), BenchmarkError> {
 		let member = make_member::<T, I>(1)?;
diff --git a/substrate/frame/core-fellowship/src/lib.rs b/substrate/frame/core-fellowship/src/lib.rs
index c61447e3628..22ba63b2616 100644
--- a/substrate/frame/core-fellowship/src/lib.rs
+++ b/substrate/frame/core-fellowship/src/lib.rs
@@ -21,6 +21,7 @@
 //! This only handles members of non-zero rank.
 //!
 //! # Process Flow
+//!
 //! - Begin with a call to `induct`, where some privileged origin (perhaps a pre-existing member of
 //!   `rank > 1`) is able to make a candidate from an account and introduce it to be tracked in this
 //!   pallet in order to allow evidence to be submitted and promotion voted on.
@@ -36,8 +37,9 @@
 //!   `bump` to demote the candidate by one rank.
 //! - If a candidate fails to be promoted to a member within the `offboard_timeout` period, then
 //!   anyone may call `bump` to remove the account's candidacy.
-//! - Pre-existing members may call `import` to have their rank recognised and be inducted into this
-//!   pallet (to gain a salary and allow for eventual promotion).
+//! - Pre-existing members may call `import_member` on themselves (formerly `import`) to have their
+//!   rank recognised and be inducted into this pallet (to gain a salary and allow for eventual
+//!   promotion).
 //! - If, externally to this pallet, a member or candidate has their rank removed completely, then
 //!   `offboard` may be called to remove them entirely from this pallet.
 //!
@@ -585,28 +587,44 @@ pub mod pallet {
 			Ok(if replaced { Pays::Yes } else { Pays::No }.into())
 		}
 
-		/// Introduce an already-ranked individual of the collective into this pallet. The rank may
-		/// still be zero.
+		/// Introduce an already-ranked individual of the collective into this pallet.
 		///
-		/// This resets `last_proof` to the current block and `last_promotion` will be set to zero,
-		/// thereby delaying any automatic demotion but allowing immediate promotion.
+		/// The rank may still be zero. This resets `last_proof` to the current block and
+		/// `last_promotion` will be set to zero, thereby delaying any automatic demotion but
+		/// allowing immediate promotion.
 		///
 		/// - `origin`: A signed origin of a ranked, but not tracked, account.
 		#[pallet::weight(T::WeightInfo::import())]
 		#[pallet::call_index(8)]
+		#[deprecated = "Use `import_member` instead"]
+		#[allow(deprecated)] // Otherwise FRAME will complain about using something deprecated.
 		pub fn import(origin: OriginFor<T>) -> DispatchResultWithPostInfo {
 			let who = ensure_signed(origin)?;
-			ensure!(!Member::<T, I>::contains_key(&who), Error::<T, I>::AlreadyInducted);
-			let rank = T::Members::rank_of(&who).ok_or(Error::<T, I>::Unranked)?;
+			Self::do_import(who)?;
 
-			let now = frame_system::Pallet::<T>::block_number();
-			Member::<T, I>::insert(
-				&who,
-				MemberStatus { is_active: true, last_promotion: 0u32.into(), last_proof: now },
-			);
-			Self::deposit_event(Event::<T, I>::Imported { who, rank });
+			Ok(Pays::No.into()) // Successful imports are free
+		}
 
-			Ok(Pays::No.into())
+		/// Introduce an already-ranked individual of the collective into this pallet.
+		///
+		/// The rank may still be zero. Can be called by anyone on any collective member - including
+		/// the sender.
+		///
+		/// This resets `last_proof` to the current block and `last_promotion` will be set to zero,
+		/// thereby delaying any automatic demotion but allowing immediate promotion.
+		///
+		/// - `origin`: A signed origin of a ranked, but not tracked, account.
+		/// - `who`: The account ID of the collective member to be inducted.
+		#[pallet::weight(T::WeightInfo::set_partial_params())]
+		#[pallet::call_index(11)]
+		pub fn import_member(
+			origin: OriginFor<T>,
+			who: T::AccountId,
+		) -> DispatchResultWithPostInfo {
+			ensure_signed(origin)?;
+			Self::do_import(who)?;
+
+			Ok(Pays::No.into()) // Successful imports are free
 		}
 
 		/// Set the parameters partially.
@@ -661,6 +679,24 @@ pub mod pallet {
 				}
 			}
 		}
+
+		/// Import `who` into the core-fellowship pallet.
+		///
+		/// `who` must be a member of the collective but *not* already imported.
+		pub(crate) fn do_import(who: T::AccountId) -> DispatchResult {
+			ensure!(!Member::<T, I>::contains_key(&who), Error::<T, I>::AlreadyInducted);
+			let rank = T::Members::rank_of(&who).ok_or(Error::<T, I>::Unranked)?;
+
+			let now = frame_system::Pallet::<T>::block_number();
+			Member::<T, I>::insert(
+				&who,
+				MemberStatus { is_active: true, last_promotion: 0u32.into(), last_proof: now },
+			);
+			Self::deposit_event(Event::<T, I>::Imported { who, rank });
+
+			Ok(())
+		}
+
 		/// Convert a rank into a `0..RANK_COUNT` index suitable for the arrays in Params.
 		///
 		/// Rank 1 becomes index 0, rank `RANK_COUNT` becomes index `RANK_COUNT - 1`. Any rank not
@@ -766,6 +802,7 @@ impl<T: Config<I>, I: 'static>
 	pallet_ranked_collective::BenchmarkSetup<<T as frame_system::Config>::AccountId> for Pallet<T, I>
 {
 	fn ensure_member(who: &<T as frame_system::Config>::AccountId) {
+		#[allow(deprecated)]
 		Self::import(frame_system::RawOrigin::Signed(who.clone()).into()).unwrap();
 	}
 }
diff --git a/substrate/frame/core-fellowship/src/tests/integration.rs b/substrate/frame/core-fellowship/src/tests/integration.rs
index 7a48ed9783e..b2149336547 100644
--- a/substrate/frame/core-fellowship/src/tests/integration.rs
+++ b/substrate/frame/core-fellowship/src/tests/integration.rs
@@ -17,8 +17,10 @@
 
 //! Integration test together with the ranked-collective pallet.
 
+#![allow(deprecated)]
+
 use frame_support::{
-	assert_noop, assert_ok, derive_impl, hypothetically, ord_parameter_types,
+	assert_noop, assert_ok, derive_impl, hypothetically, hypothetically_ok, ord_parameter_types,
 	pallet_prelude::Weight,
 	parameter_types,
 	traits::{ConstU16, EitherOf, IsInVec, MapSuccess, NoOpPoll, TryMapSuccess},
@@ -170,6 +172,37 @@ fn evidence(e: u32) -> Evidence<Test, ()> {
 		.expect("Static length matches")
 }
 
+#[test]
+fn import_simple_works() {
+	new_test_ext().execute_with(|| {
+		for i in 0u16..9 {
+			let acc = i as u64;
+
+			// Does not work yet
+			assert_noop!(CoreFellowship::import(signed(acc)), Error::<Test>::Unranked);
+			assert_noop!(
+				CoreFellowship::import_member(signed(acc + 1), acc),
+				Error::<Test>::Unranked
+			);
+
+			assert_ok!(Club::add_member(RuntimeOrigin::root(), acc));
+			promote_n_times(acc, i);
+
+			hypothetically_ok!(CoreFellowship::import(signed(acc)));
+			hypothetically_ok!(CoreFellowship::import_member(signed(acc), acc));
+			// Works from other accounts
+			assert_ok!(CoreFellowship::import_member(signed(acc + 1), acc));
+
+			// Does not work again
+			assert_noop!(CoreFellowship::import(signed(acc)), Error::<Test>::AlreadyInducted);
+			assert_noop!(
+				CoreFellowship::import_member(signed(acc + 1), acc),
+				Error::<Test>::AlreadyInducted
+			);
+		}
+	});
+}
+
 #[test]
 fn swap_simple_works() {
 	new_test_ext().execute_with(|| {
@@ -178,7 +211,8 @@ fn swap_simple_works() {
 
 			assert_ok!(Club::add_member(RuntimeOrigin::root(), acc));
 			promote_n_times(acc, i);
-			assert_ok!(CoreFellowship::import(signed(acc)));
+			hypothetically_ok!(CoreFellowship::import(signed(acc)));
+			assert_ok!(CoreFellowship::import_member(signed(acc), acc));
 
 			// Swapping normally works:
 			assert_ok!(Club::exchange_member(RuntimeOrigin::root(), acc, acc + 10));
diff --git a/substrate/frame/core-fellowship/src/tests/unit.rs b/substrate/frame/core-fellowship/src/tests/unit.rs
index 11d1ea9fe5b..f4418ed439d 100644
--- a/substrate/frame/core-fellowship/src/tests/unit.rs
+++ b/substrate/frame/core-fellowship/src/tests/unit.rs
@@ -17,6 +17,8 @@
 
 //! The crate's tests.
 
+#![allow(deprecated)]
+
 use std::collections::BTreeMap;
 
 use core::cell::RefCell;
@@ -222,6 +224,66 @@ fn set_partial_params_works() {
 	});
 }
 
+#[test]
+fn import_member_works() {
+	new_test_ext().execute_with(|| {
+		assert_noop!(CoreFellowship::import_member(signed(0), 0), Error::<Test>::Unranked);
+		assert_noop!(CoreFellowship::import(signed(0)), Error::<Test>::Unranked);
+
+		// Make induction work:
+		set_rank(0, 1);
+		assert!(!Member::<Test>::contains_key(0), "not yet imported");
+
+		// `import_member` can be used to induct ourselves:
+		hypothetically!({
+			assert_ok!(CoreFellowship::import_member(signed(0), 0));
+			assert!(Member::<Test>::contains_key(0), "got imported");
+
+			// Twice does not work:
+			assert_noop!(
+				CoreFellowship::import_member(signed(0), 0),
+				Error::<Test>::AlreadyInducted
+			);
+			assert_noop!(CoreFellowship::import(signed(0)), Error::<Test>::AlreadyInducted);
+		});
+
+		// But we could have also used `import`:
+		hypothetically!({
+			assert_ok!(CoreFellowship::import(signed(0)));
+			assert!(Member::<Test>::contains_key(0), "got imported");
+
+			// Twice does not work:
+			assert_noop!(
+				CoreFellowship::import_member(signed(0), 0),
+				Error::<Test>::AlreadyInducted
+			);
+			assert_noop!(CoreFellowship::import(signed(0)), Error::<Test>::AlreadyInducted);
+		});
+	});
+}
+
+#[test]
+fn import_member_same_as_import() {
+	new_test_ext().execute_with(|| {
+		for rank in 0..=9 {
+			set_rank(0, rank);
+
+			let import_root = hypothetically!({
+				assert_ok!(CoreFellowship::import(signed(0)));
+				sp_io::storage::root(sp_runtime::StateVersion::V1)
+			});
+
+			let import_member_root = hypothetically!({
+				assert_ok!(CoreFellowship::import_member(signed(1), 0));
+				sp_io::storage::root(sp_runtime::StateVersion::V1)
+			});
+
+			// `import` and `import_member` do exactly the same thing.
+			assert_eq!(import_root, import_member_root);
+		}
+	});
+}
+
 #[test]
 fn induct_works() {
 	new_test_ext().execute_with(|| {
diff --git a/substrate/frame/core-fellowship/src/weights.rs b/substrate/frame/core-fellowship/src/weights.rs
index 9bca8cb5609..e6381c854d3 100644
--- a/substrate/frame/core-fellowship/src/weights.rs
+++ b/substrate/frame/core-fellowship/src/weights.rs
@@ -61,6 +61,7 @@ pub trait WeightInfo {
 	fn promote_fast(r: u32, ) -> Weight;
 	fn offboard() -> Weight;
 	fn import() -> Weight;
+	fn import_member() -> Weight;
 	fn approve() -> Weight;
 	fn submit_evidence() -> Weight;
 }
@@ -76,7 +77,7 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `0`
 		// Minimum execution time: 6_652_000 picoseconds.
 		Weight::from_parts(7_082_000, 0)
-			.saturating_add(T::DbWeight::get().writes(1_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
 	/// Storage: `CoreFellowship::Params` (r:1 w:1)
 	/// Proof: `CoreFellowship::Params` (`max_values`: Some(1), `max_size`: Some(368), added: 863, mode: `MaxEncodedLen`)
@@ -86,8 +87,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `1853`
 		// Minimum execution time: 12_485_000 picoseconds.
 		Weight::from_parts(12_784_000, 1853)
-			.saturating_add(T::DbWeight::get().reads(1_u64))
-			.saturating_add(T::DbWeight::get().writes(1_u64))
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
 	/// Storage: `CoreFellowship::Member` (r:1 w:1)
 	/// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
@@ -109,8 +110,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `19894`
 		// Minimum execution time: 61_243_000 picoseconds.
 		Weight::from_parts(63_033_000, 19894)
-			.saturating_add(T::DbWeight::get().reads(6_u64))
-			.saturating_add(T::DbWeight::get().writes(6_u64))
+			.saturating_add(RocksDbWeight::get().reads(6_u64))
+			.saturating_add(RocksDbWeight::get().writes(6_u64))
 	}
 	/// Storage: `CoreFellowship::Member` (r:1 w:1)
 	/// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
@@ -132,8 +133,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `19894`
 		// Minimum execution time: 65_063_000 picoseconds.
 		Weight::from_parts(67_047_000, 19894)
-			.saturating_add(T::DbWeight::get().reads(6_u64))
-			.saturating_add(T::DbWeight::get().writes(6_u64))
+			.saturating_add(RocksDbWeight::get().reads(6_u64))
+			.saturating_add(RocksDbWeight::get().writes(6_u64))
 	}
 	/// Storage: `RankedCollective::Members` (r:1 w:0)
 	/// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
@@ -145,8 +146,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `3514`
 		// Minimum execution time: 21_924_000 picoseconds.
 		Weight::from_parts(22_691_000, 3514)
-			.saturating_add(T::DbWeight::get().reads(2_u64))
-			.saturating_add(T::DbWeight::get().writes(1_u64))
+			.saturating_add(RocksDbWeight::get().reads(2_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
 	/// Storage: `CoreFellowship::Member` (r:1 w:1)
 	/// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
@@ -164,8 +165,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `3514`
 		// Minimum execution time: 24_720_000 picoseconds.
 		Weight::from_parts(25_580_000, 3514)
-			.saturating_add(T::DbWeight::get().reads(3_u64))
-			.saturating_add(T::DbWeight::get().writes(5_u64))
+			.saturating_add(RocksDbWeight::get().reads(3_u64))
+			.saturating_add(RocksDbWeight::get().writes(5_u64))
 	}
 	/// Storage: `RankedCollective::Members` (r:1 w:1)
 	/// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
@@ -187,8 +188,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `19894`
 		// Minimum execution time: 58_481_000 picoseconds.
 		Weight::from_parts(59_510_000, 19894)
-			.saturating_add(T::DbWeight::get().reads(5_u64))
-			.saturating_add(T::DbWeight::get().writes(6_u64))
+			.saturating_add(RocksDbWeight::get().reads(5_u64))
+			.saturating_add(RocksDbWeight::get().writes(6_u64))
 	}
 	/// Storage: `RankedCollective::Members` (r:1 w:1)
 	/// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
@@ -211,10 +212,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		Weight::from_parts(42_220_685, 19894)
 			// Standard Error: 18_061
 			.saturating_add(Weight::from_parts(13_858_309, 0).saturating_mul(r.into()))
-			.saturating_add(T::DbWeight::get().reads(3_u64))
-			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into())))
-			.saturating_add(T::DbWeight::get().writes(3_u64))
-			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into())))
+			.saturating_add(RocksDbWeight::get().reads(3_u64))
+			.saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(r.into())))
+			.saturating_add(RocksDbWeight::get().writes(3_u64))
+			.saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(r.into())))
 			.saturating_add(Weight::from_parts(0, 2489).saturating_mul(r.into()))
 	}
 	/// Storage: `RankedCollective::Members` (r:1 w:0)
@@ -229,8 +230,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `3514`
 		// Minimum execution time: 17_492_000 picoseconds.
 		Weight::from_parts(18_324_000, 3514)
-			.saturating_add(T::DbWeight::get().reads(2_u64))
-			.saturating_add(T::DbWeight::get().writes(2_u64))
+			.saturating_add(RocksDbWeight::get().reads(2_u64))
+			.saturating_add(RocksDbWeight::get().writes(2_u64))
 	}
 	/// Storage: `CoreFellowship::Member` (r:1 w:1)
 	/// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
@@ -242,8 +243,18 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `3514`
 		// Minimum execution time: 16_534_000 picoseconds.
 		Weight::from_parts(17_046_000, 3514)
-			.saturating_add(T::DbWeight::get().reads(2_u64))
-			.saturating_add(T::DbWeight::get().writes(1_u64))
+			.saturating_add(RocksDbWeight::get().reads(2_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
+	fn import_member() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `285`
+		//  Estimated: `3514`
+		// Minimum execution time: 23_239_000 picoseconds.
+		Weight::from_parts(23_684_000, 0)
+			.saturating_add(Weight::from_parts(0, 3514))
+			.saturating_add(RocksDbWeight::get().reads(2))
+			.saturating_add(RocksDbWeight::get().writes(1))
 	}
 	/// Storage: `RankedCollective::Members` (r:1 w:0)
 	/// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
@@ -257,8 +268,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `19894`
 		// Minimum execution time: 42_264_000 picoseconds.
 		Weight::from_parts(43_281_000, 19894)
-			.saturating_add(T::DbWeight::get().reads(3_u64))
-			.saturating_add(T::DbWeight::get().writes(2_u64))
+			.saturating_add(RocksDbWeight::get().reads(3_u64))
+			.saturating_add(RocksDbWeight::get().writes(2_u64))
 	}
 	/// Storage: `CoreFellowship::Member` (r:1 w:0)
 	/// Proof: `CoreFellowship::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
@@ -270,8 +281,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		//  Estimated: `19894`
 		// Minimum execution time: 25_461_000 picoseconds.
 		Weight::from_parts(26_014_000, 19894)
-			.saturating_add(T::DbWeight::get().reads(2_u64))
-			.saturating_add(T::DbWeight::get().writes(1_u64))
+			.saturating_add(RocksDbWeight::get().reads(2_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
 }
 
@@ -454,6 +465,16 @@ impl WeightInfo for () {
 			.saturating_add(RocksDbWeight::get().reads(2_u64))
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
+	fn import_member() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `285`
+		//  Estimated: `3514`
+		// Minimum execution time: 23_239_000 picoseconds.
+		Weight::from_parts(23_684_000, 0)
+			.saturating_add(Weight::from_parts(0, 3514))
+			.saturating_add(RocksDbWeight::get().reads(2))
+			.saturating_add(RocksDbWeight::get().writes(1))
+	}
 	/// Storage: `RankedCollective::Members` (r:1 w:0)
 	/// Proof: `RankedCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`)
 	/// Storage: `CoreFellowship::Member` (r:1 w:1)
-- 
GitLab


From 6b6c70b0165b2c38e239eb740a7561e9ed4570de Mon Sep 17 00:00:00 2001
From: jasmy <3776356370@qq.com>
Date: Tue, 7 Jan 2025 03:16:08 +0800
Subject: [PATCH 024/116] Fix typos (#7027)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Co-authored-by: Dónal Murray <donal.murray@parity.io>
---
 bridges/SECURITY.md                 | 2 +-
 bridges/modules/messages/README.md  | 2 +-
 cumulus/docs/overview.md            | 2 +-
 substrate/client/network/README.md  | 2 +-
 substrate/frame/bounties/README.md  | 2 +-
 substrate/frame/bounties/src/lib.rs | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/bridges/SECURITY.md b/bridges/SECURITY.md
index 9f215c88765..ea19eca42cc 100644
--- a/bridges/SECURITY.md
+++ b/bridges/SECURITY.md
@@ -13,6 +13,6 @@ If you think that your report might be eligible for the Bug Bounty Program, plea
 Please check up-to-date [Parity Bug Bounty Program rules](https://www.parity.io/bug-bounty) to find out the information
 about our Bug Bounty Program.
 
-**Warning**: This is an unified SECURITY.md file for Paritytech GitHub Organization. The presence of this file does not
+**Warning**: This is a unified SECURITY.md file for Paritytech GitHub Organization. The presence of this file does not
 mean that this repository is covered by the Bug Bounty program. Please always check the Bug Bounty Program scope for
 information.
diff --git a/bridges/modules/messages/README.md b/bridges/modules/messages/README.md
index a78c8680249..7d9a23b4ba1 100644
--- a/bridges/modules/messages/README.md
+++ b/bridges/modules/messages/README.md
@@ -13,7 +13,7 @@ module and the final goal is to hand message to the message dispatch mechanism.
 
 ## Overview
 
-Message lane is an unidirectional channel, where messages are sent from source chain to the target chain. At the same
+Message lane is a unidirectional channel, where messages are sent from source chain to the target chain. At the same
 time, a single instance of messages module supports both outbound lanes and inbound lanes. So the chain where the module
 is deployed (this chain), may act as a source chain for outbound messages (heading to a bridged chain) and as a target
 chain for inbound messages (coming from a bridged chain).
diff --git a/cumulus/docs/overview.md b/cumulus/docs/overview.md
index 402c56042c4..82603257a87 100644
--- a/cumulus/docs/overview.md
+++ b/cumulus/docs/overview.md
@@ -70,7 +70,7 @@ A Parachain validator needs to validate a given PoVBlock, but without requiring
 the Parachain. To still make it possible to validate the Parachain block, the PoVBlock contains the
 witness data. The witness data is a proof that is collected while building the block. The proof will
 contain all trie nodes that are read during the block production. Cumulus uses the witness data to
-reconstruct a partial trie and uses this a storage when executing the block.
+reconstruct a partial trie and uses this as storage when executing the block.
 
 The outgoing messages are also collected at block production. These are messages from the Parachain
 the block is built for to other Parachains or to the relay chain itself.
diff --git a/substrate/client/network/README.md b/substrate/client/network/README.md
index 9903109d847..4336bb78533 100644
--- a/substrate/client/network/README.md
+++ b/substrate/client/network/README.md
@@ -245,7 +245,7 @@ only downloads finalized authority set changes.
 GRANDPA keeps justifications for each finalized authority set change. Each change is signed by the
 authorities from the previous set. By downloading and verifying these signed hand-offs starting from genesis,
 we arrive at a recent header faster than downloading full header chain. Each `WarpSyncRequest` contains a block
-hash to a to start collecting proofs from. `WarpSyncResponse` contains a sequence of block headers and
+hash to start collecting proofs from. `WarpSyncResponse` contains a sequence of block headers and
 justifications. The proof downloader checks the justifications and continues requesting proofs from the last
 header hash, until it arrives at some recent header.
 
diff --git a/substrate/frame/bounties/README.md b/substrate/frame/bounties/README.md
index 232334cb1ed..2293ae161e2 100644
--- a/substrate/frame/bounties/README.md
+++ b/substrate/frame/bounties/README.md
@@ -19,7 +19,7 @@ curator or once the bounty is active or payout is pending, resulting in the slas
 curator's deposit.
 
 This pallet may opt into using a [`ChildBountyManager`] that enables bounties to be split into
-sub-bounties, as children of anh established bounty (called the parent in the context of it's
+sub-bounties, as children of an established bounty (called the parent in the context of it's
 children).
 
 > NOTE: The parent bounty cannot be closed if it has a non-zero number of it has active child
diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs
index d9accc5061c..9b6e3c06e91 100644
--- a/substrate/frame/bounties/src/lib.rs
+++ b/substrate/frame/bounties/src/lib.rs
@@ -36,7 +36,7 @@
 //! curator's deposit.
 //!
 //! This pallet may opt into using a [`ChildBountyManager`] that enables bounties to be split into
-//! sub-bounties, as children of anh established bounty (called the parent in the context of it's
+//! sub-bounties, as children of an established bounty (called the parent in the context of it's
 //! children).
 //!
 //! > NOTE: The parent bounty cannot be closed if it has a non-zero number of it has active child
-- 
GitLab


From c139739868eddbda495d642219a57602f63c18f5 Mon Sep 17 00:00:00 2001
From: Jeeyong Um <conr2d@proton.me>
Date: Tue, 7 Jan 2025 15:57:06 +0800
Subject: [PATCH 025/116] Remove usage of `sp-std` from Substrate (#7043)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

# Description

This PR removes usage of deprecated `sp-std` from Substrate. (following
PR of #5010)

## Integration

This PR doesn't remove re-exported `sp_std` from any crates yet, so
downstream projects using re-exported `sp_std` will not be affected.

## Review Notes

The existing code using `sp-std` is refactored to use `alloc` and `core`
directly. The key-value maps are instantiated from a vector of tuples
directly instead of using `sp_std::map!` macro.

`sp_std::Writer` is a helper type to use `Vec<u8>` with
`core::fmt::Write` trait. This PR copied it into `sp-runtime`, because
all crates using `sp_std::Writer` (including `sp-runtime` itself,
`frame-support`, etc.) depend on `sp-runtime`.

If this PR is merged, I would write following PRs to remove remaining
usage of `sp-std` from `bridges` and `cumulus`.

---------

Co-authored-by: command-bot <>
Co-authored-by: Guillaume Thiolliere <guillaume.thiolliere@parity.io>
Co-authored-by: Bastian Köcher <info@kchr.de>
Co-authored-by: Bastian Köcher <git@kchr.de>
---
 Cargo.lock                                    | 11 ----
 prdoc/pr_7043.prdoc                           | 51 +++++++++++++++++++
 substrate/client/sysinfo/Cargo.toml           |  1 -
 .../frame/bags-list/remote-tests/Cargo.toml   |  1 -
 substrate/frame/contracts/Cargo.toml          |  2 -
 .../frame/contracts/proc-macro/src/lib.rs     |  7 ++-
 .../frame/contracts/src/transient_storage.rs  |  4 +-
 .../test-staking-e2e/Cargo.toml               |  1 -
 .../frame/nft-fractionalization/Cargo.toml    |  1 -
 .../test-delegate-stake/Cargo.toml            |  1 -
 .../test-transfer-stake/Cargo.toml            |  1 -
 substrate/frame/revive/Cargo.toml             |  2 -
 substrate/frame/revive/proc-macro/src/lib.rs  |  7 ++-
 .../frame/revive/src/transient_storage.rs     |  4 +-
 substrate/frame/root-offences/Cargo.toml      |  1 -
 .../procedural/src/pallet/expand/config.rs    |  6 +--
 substrate/frame/support/src/lib.rs            |  7 +--
 substrate/frame/system/Cargo.toml             |  2 -
 substrate/frame/system/src/lib.rs             | 16 +++---
 substrate/frame/uniques/Cargo.toml            |  1 -
 .../src/generic/unchecked_extrinsic.rs        |  2 +-
 .../runtime/src/proving_trie/base16.rs        |  4 +-
 .../runtime/src/proving_trie/base2.rs         |  4 +-
 .../runtime/src/proving_trie/mod.rs           |  2 +-
 .../primitives/runtime/src/runtime_logger.rs  |  6 +--
 .../primitives/runtime/src/traits/mod.rs      |  2 +-
 .../src/traits/transaction_extension/mod.rs   | 11 ++--
 27 files changed, 92 insertions(+), 66 deletions(-)
 create mode 100644 prdoc/pr_7043.prdoc

diff --git a/Cargo.lock b/Cargo.lock
index b0fb0586be3..ef0eb9f7e3d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -7716,7 +7716,6 @@ dependencies = [
  "sp-externalities 0.25.0",
  "sp-io 30.0.0",
  "sp-runtime 31.0.1",
- "sp-std 14.0.0",
  "sp-version 29.0.0",
  "sp-weights 27.0.0",
  "substrate-test-runtime-client",
@@ -12371,7 +12370,6 @@ dependencies = [
  "pallet-staking 28.0.0",
  "sp-core 28.0.0",
  "sp-runtime 31.0.1",
- "sp-std 14.0.0",
  "sp-storage 19.0.0",
  "sp-tracing 16.0.0",
 ]
@@ -12967,7 +12965,6 @@ dependencies = [
  "sp-io 30.0.0",
  "sp-keystore 0.34.0",
  "sp-runtime 31.0.1",
- "sp-std 14.0.0",
  "sp-tracing 16.0.0",
  "staging-xcm 7.0.0",
  "staging-xcm-builder 7.0.0",
@@ -13352,7 +13349,6 @@ dependencies = [
  "sp-npos-elections 26.0.0",
  "sp-runtime 31.0.1",
  "sp-staking 26.0.0",
- "sp-std 14.0.0",
  "sp-tracing 16.0.0",
 ]
 
@@ -14157,7 +14153,6 @@ dependencies = [
  "sp-core 28.0.0",
  "sp-io 30.0.0",
  "sp-runtime 31.0.1",
- "sp-std 14.0.0",
 ]
 
 [[package]]
@@ -14432,7 +14427,6 @@ dependencies = [
  "sp-io 30.0.0",
  "sp-runtime 31.0.1",
  "sp-staking 26.0.0",
- "sp-std 14.0.0",
  "sp-tracing 16.0.0",
 ]
 
@@ -14456,7 +14450,6 @@ dependencies = [
  "sp-io 30.0.0",
  "sp-runtime 31.0.1",
  "sp-staking 26.0.0",
- "sp-std 14.0.0",
  "sp-tracing 16.0.0",
 ]
 
@@ -14870,7 +14863,6 @@ dependencies = [
  "sp-io 30.0.0",
  "sp-keystore 0.34.0",
  "sp-runtime 31.0.1",
- "sp-std 14.0.0",
  "sp-tracing 16.0.0",
  "staging-xcm 7.0.0",
  "staging-xcm-builder 7.0.0",
@@ -15095,7 +15087,6 @@ dependencies = [
  "sp-io 30.0.0",
  "sp-runtime 31.0.1",
  "sp-staking 26.0.0",
- "sp-std 14.0.0",
 ]
 
 [[package]]
@@ -15941,7 +15932,6 @@ dependencies = [
  "sp-core 28.0.0",
  "sp-io 30.0.0",
  "sp-runtime 31.0.1",
- "sp-std 14.0.0",
 ]
 
 [[package]]
@@ -23681,7 +23671,6 @@ dependencies = [
  "sp-crypto-hashing 0.1.0",
  "sp-io 30.0.0",
  "sp-runtime 31.0.1",
- "sp-std 14.0.0",
 ]
 
 [[package]]
diff --git a/prdoc/pr_7043.prdoc b/prdoc/pr_7043.prdoc
new file mode 100644
index 00000000000..d7f6cd6907c
--- /dev/null
+++ b/prdoc/pr_7043.prdoc
@@ -0,0 +1,51 @@
+title: Remove usage of `sp-std` from Substrate
+doc:
+- audience: Runtime Dev
+  description: |-
+    # Description
+
+    This PR removes usage of deprecated `sp-std` from Substrate. (following PR of #5010)
+
+    ## Integration
+
+    This PR doesn't remove re-exported `sp_std` from any crates yet, so downstream projects using re-exported `sp_std` will not be affected.
+
+    ## Review Notes
+
+    The existing code using `sp-std` is refactored to use `alloc` and `core` directly. The key-value maps are instantiated from an array of tuples directly instead of using `sp_std::map!` macro.
+
+    This PR replaces `sp_std::Writer`, a helper type for using `Vec<u8>` with `core::fmt::Write` trait, with `alloc::string::String`.
+
+crates:
+- name: pallet-contracts
+  bump: patch
+- name: pallet-revive
+  bump: patch
+- name: sp-runtime
+  bump: patch
+- name: frame-support-procedural
+  bump: patch
+- name: frame-system
+  bump: patch
+- name: pallet-contracts-proc-macro
+  bump: patch
+- name: pallet-revive-proc-macro
+  bump: patch
+- name: frame-support
+  bump: patch
+- name: sc-sysinfo
+  bump: patch
+- name: pallet-bags-list-remote-tests
+  bump: patch
+- name: pallet-election-provider-e2e-test
+  bump: patch
+- name: pallet-nft-fractionalization
+  bump: patch
+- name: pallet-nomination-pools-test-delegate-stake
+  bump: patch
+- name: pallet-nomination-pools-test-transfer-stake
+  bump: patch
+- name: pallet-root-offences
+  bump: patch
+- name: pallet-uniques
+  bump: patch
diff --git a/substrate/client/sysinfo/Cargo.toml b/substrate/client/sysinfo/Cargo.toml
index c7eed77eda7..afc464c3588 100644
--- a/substrate/client/sysinfo/Cargo.toml
+++ b/substrate/client/sysinfo/Cargo.toml
@@ -30,7 +30,6 @@ serde_json = { workspace = true, default-features = true }
 sp-core = { workspace = true, default-features = true }
 sp-crypto-hashing = { workspace = true, default-features = true }
 sp-io = { workspace = true, default-features = true }
-sp-std = { workspace = true, default-features = true }
 
 [dev-dependencies]
 sp-runtime = { workspace = true, default-features = true }
diff --git a/substrate/frame/bags-list/remote-tests/Cargo.toml b/substrate/frame/bags-list/remote-tests/Cargo.toml
index 99b203e73fb..e3215803a02 100644
--- a/substrate/frame/bags-list/remote-tests/Cargo.toml
+++ b/substrate/frame/bags-list/remote-tests/Cargo.toml
@@ -26,7 +26,6 @@ pallet-staking = { workspace = true, default-features = true }
 # core
 sp-core = { workspace = true, default-features = true }
 sp-runtime = { workspace = true, default-features = true }
-sp-std = { workspace = true, default-features = true }
 sp-storage = { workspace = true, default-features = true }
 sp-tracing = { workspace = true, default-features = true }
 
diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml
index e39128639e3..5784e6dd155 100644
--- a/substrate/frame/contracts/Cargo.toml
+++ b/substrate/frame/contracts/Cargo.toml
@@ -50,7 +50,6 @@ sp-api = { workspace = true }
 sp-core = { workspace = true }
 sp-io = { workspace = true }
 sp-runtime = { workspace = true }
-sp-std = { workspace = true }
 
 xcm = { workspace = true }
 xcm-builder = { workspace = true }
@@ -98,7 +97,6 @@ std = [
 	"sp-io/std",
 	"sp-keystore/std",
 	"sp-runtime/std",
-	"sp-std/std",
 	"wasm-instrument?/std",
 	"wasmi/std",
 	"xcm-builder/std",
diff --git a/substrate/frame/contracts/proc-macro/src/lib.rs b/substrate/frame/contracts/proc-macro/src/lib.rs
index 4aba1d24dbd..5c3c34e6ef3 100644
--- a/substrate/frame/contracts/proc-macro/src/lib.rs
+++ b/substrate/frame/contracts/proc-macro/src/lib.rs
@@ -650,10 +650,9 @@ fn expand_functions(def: &EnvDef, expand_mode: ExpandMode) -> TokenStream2 {
 				let result = #body;
 				if ::log::log_enabled!(target: "runtime::contracts::strace", ::log::Level::Trace) {
 						use core::fmt::Write;
-						let mut w = sp_std::Writer::default();
-						let _ = core::write!(&mut w, #trace_fmt_str, #( #trace_fmt_args, )* result);
-						let msg = core::str::from_utf8(&w.inner()).unwrap_or_default();
-						ctx.ext().append_debug_buffer(msg);
+						let mut msg = alloc::string::String::default();
+						let _ = core::write!(&mut msg, #trace_fmt_str, #( #trace_fmt_args, )* result);
+						ctx.ext().append_debug_buffer(&msg);
 				}
 				result
 			}
diff --git a/substrate/frame/contracts/src/transient_storage.rs b/substrate/frame/contracts/src/transient_storage.rs
index c795a966385..c9b1dac1ad7 100644
--- a/substrate/frame/contracts/src/transient_storage.rs
+++ b/substrate/frame/contracts/src/transient_storage.rs
@@ -22,11 +22,11 @@ use crate::{
 	storage::WriteOutcome,
 	Config, Error,
 };
+use alloc::{collections::BTreeMap, vec::Vec};
 use codec::Encode;
-use core::marker::PhantomData;
+use core::{marker::PhantomData, mem};
 use frame_support::DefaultNoBound;
 use sp_runtime::{DispatchError, DispatchResult, Saturating};
-use sp_std::{collections::btree_map::BTreeMap, mem, vec::Vec};
 
 /// Meter entry tracks transaction allocations.
 #[derive(Default, Debug)]
diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml
index 5009d3d54d5..7a48ae868a5 100644
--- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml
+++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml
@@ -26,7 +26,6 @@ sp-io = { workspace = true, default-features = true }
 sp-npos-elections = { workspace = true }
 sp-runtime = { workspace = true, default-features = true }
 sp-staking = { workspace = true, default-features = true }
-sp-std = { workspace = true, default-features = true }
 sp-tracing = { workspace = true, default-features = true }
 
 frame-election-provider-support = { workspace = true, default-features = true }
diff --git a/substrate/frame/nft-fractionalization/Cargo.toml b/substrate/frame/nft-fractionalization/Cargo.toml
index 7f6df86ed0e..23537b22789 100644
--- a/substrate/frame/nft-fractionalization/Cargo.toml
+++ b/substrate/frame/nft-fractionalization/Cargo.toml
@@ -30,7 +30,6 @@ sp-runtime = { workspace = true }
 pallet-balances = { workspace = true, default-features = true }
 sp-core = { workspace = true, default-features = true }
 sp-io = { workspace = true, default-features = true }
-sp-std = { workspace = true, default-features = true }
 
 [features]
 default = ["std"]
diff --git a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml
index fe3743d7e5d..62c2fb625fc 100644
--- a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml
+++ b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml
@@ -23,7 +23,6 @@ sp-core = { workspace = true, default-features = true }
 sp-io = { workspace = true, default-features = true }
 sp-runtime = { workspace = true, default-features = true }
 sp-staking = { workspace = true, default-features = true }
-sp-std = { workspace = true, default-features = true }
 
 frame-election-provider-support = { workspace = true, default-features = true }
 frame-support = { features = ["experimental"], workspace = true, default-features = true }
diff --git a/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml
index 2cdc4c41a08..0b21d5f4e8c 100644
--- a/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml
+++ b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml
@@ -23,7 +23,6 @@ sp-core = { workspace = true, default-features = true }
 sp-io = { workspace = true, default-features = true }
 sp-runtime = { workspace = true, default-features = true }
 sp-staking = { workspace = true, default-features = true }
-sp-std = { workspace = true, default-features = true }
 
 frame-election-provider-support = { workspace = true, default-features = true }
 frame-support = { workspace = true, default-features = true }
diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml
index fa008f8e836..1284f5ee894 100644
--- a/substrate/frame/revive/Cargo.toml
+++ b/substrate/frame/revive/Cargo.toml
@@ -46,7 +46,6 @@ sp-arithmetic = { workspace = true }
 sp-core = { workspace = true }
 sp-io = { workspace = true }
 sp-runtime = { workspace = true }
-sp-std = { workspace = true }
 subxt-signer = { workspace = true, optional = true, features = [
 	"unstable-eth",
 ] }
@@ -99,7 +98,6 @@ std = [
 	"sp-io/std",
 	"sp-keystore/std",
 	"sp-runtime/std",
-	"sp-std/std",
 	"subxt-signer",
 	"xcm-builder/std",
 	"xcm/std",
diff --git a/substrate/frame/revive/proc-macro/src/lib.rs b/substrate/frame/revive/proc-macro/src/lib.rs
index b6ea1a06d94..b09bdef1463 100644
--- a/substrate/frame/revive/proc-macro/src/lib.rs
+++ b/substrate/frame/revive/proc-macro/src/lib.rs
@@ -512,10 +512,9 @@ fn expand_functions(def: &EnvDef) -> TokenStream2 {
 				let result = (|| #body)();
 				if ::log::log_enabled!(target: "runtime::revive::strace", ::log::Level::Trace) {
 						use core::fmt::Write;
-						let mut w = sp_std::Writer::default();
-						let _ = core::write!(&mut w, #trace_fmt_str, #( #trace_fmt_args, )* result);
-						let msg = core::str::from_utf8(&w.inner()).unwrap_or_default();
-						self.ext().append_debug_buffer(msg);
+						let mut msg = alloc::string::String::default();
+						let _ = core::write!(&mut msg, #trace_fmt_str, #( #trace_fmt_args, )* result);
+						self.ext().append_debug_buffer(&msg);
 				}
 				result
 			}
diff --git a/substrate/frame/revive/src/transient_storage.rs b/substrate/frame/revive/src/transient_storage.rs
index 298e0296fe6..d88adc43735 100644
--- a/substrate/frame/revive/src/transient_storage.rs
+++ b/substrate/frame/revive/src/transient_storage.rs
@@ -22,11 +22,11 @@ use crate::{
 	storage::WriteOutcome,
 	Config, Error,
 };
+use alloc::{collections::BTreeMap, vec::Vec};
 use codec::Encode;
-use core::marker::PhantomData;
+use core::{marker::PhantomData, mem};
 use frame_support::DefaultNoBound;
 use sp_runtime::{DispatchError, DispatchResult, Saturating};
-use sp_std::{collections::btree_map::BTreeMap, mem, vec::Vec};
 
 /// Meter entry tracks transaction allocations.
 #[derive(Default, Debug)]
diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml
index dedde9956b6..c539f1dc4dc 100644
--- a/substrate/frame/root-offences/Cargo.toml
+++ b/substrate/frame/root-offences/Cargo.toml
@@ -34,7 +34,6 @@ pallet-timestamp = { workspace = true, default-features = true }
 
 sp-core = { workspace = true, default-features = true }
 sp-io = { workspace = true }
-sp-std = { workspace = true, default-features = true }
 
 frame-election-provider-support = { workspace = true, default-features = true }
 
diff --git a/substrate/frame/support/procedural/src/pallet/expand/config.rs b/substrate/frame/support/procedural/src/pallet/expand/config.rs
index 0a583f1359b..d39f2767236 100644
--- a/substrate/frame/support/procedural/src/pallet/expand/config.rs
+++ b/substrate/frame/support/procedural/src/pallet/expand/config.rs
@@ -126,7 +126,7 @@ pub fn expand_config_metadata(def: &Def) -> proc_macro2::TokenStream {
 				ty: #frame_support::__private::scale_info::meta_type::<
 						<T as Config #trait_use_gen>::#ident
 					>(),
-				docs: #frame_support::__private::sp_std::vec![ #( #doc ),* ],
+				docs: #frame_support::__private::vec![ #( #doc ),* ],
 			}
 		})
 	});
@@ -136,9 +136,9 @@ pub fn expand_config_metadata(def: &Def) -> proc_macro2::TokenStream {
 
 			#[doc(hidden)]
 			pub fn pallet_associated_types_metadata()
-				-> #frame_support::__private::sp_std::vec::Vec<#frame_support::__private::metadata_ir::PalletAssociatedTypeMetadataIR>
+				-> #frame_support::__private::vec::Vec<#frame_support::__private::metadata_ir::PalletAssociatedTypeMetadataIR>
 			{
-				#frame_support::__private::sp_std::vec![ #( #types ),* ]
+				#frame_support::__private::vec![ #( #types ),* ]
 			}
 		}
 	)
diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs
index c64987b17d3..a6969260e6a 100644
--- a/substrate/frame/support/src/lib.rs
+++ b/substrate/frame/support/src/lib.rs
@@ -44,6 +44,7 @@ pub mod __private {
 	pub use alloc::{
 		boxed::Box,
 		rc::Rc,
+		string::String,
 		vec,
 		vec::{IntoIter, Vec},
 	};
@@ -502,9 +503,9 @@ macro_rules! runtime_print {
 	($($arg:tt)+) => {
 		{
 			use core::fmt::Write;
-			let mut w = $crate::__private::sp_std::Writer::default();
-			let _ = core::write!(&mut w, $($arg)+);
-			$crate::__private::sp_io::misc::print_utf8(&w.inner())
+			let mut msg = $crate::__private::String::default();
+			let _ = core::write!(&mut msg, $($arg)+);
+			$crate::__private::sp_io::misc::print_utf8(msg.as_bytes())
 		}
 	}
 }
diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml
index 1340b2c55c5..8883ebd4c41 100644
--- a/substrate/frame/system/Cargo.toml
+++ b/substrate/frame/system/Cargo.toml
@@ -26,7 +26,6 @@ serde = { features = ["alloc", "derive"], workspace = true }
 sp-core = { features = ["serde"], workspace = true }
 sp-io = { workspace = true }
 sp-runtime = { features = ["serde"], workspace = true }
-sp-std = { workspace = true }
 sp-version = { features = ["serde"], workspace = true }
 sp-weights = { features = ["serde"], workspace = true }
 
@@ -47,7 +46,6 @@ std = [
 	"sp-externalities/std",
 	"sp-io/std",
 	"sp-runtime/std",
-	"sp-std/std",
 	"sp-version/std",
 	"sp-weights/std",
 ]
diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs
index 4fc69c8755f..894e1898ed1 100644
--- a/substrate/frame/system/src/lib.rs
+++ b/substrate/frame/system/src/lib.rs
@@ -120,8 +120,6 @@ use sp_runtime::{
 	},
 	DispatchError, RuntimeDebug,
 };
-#[cfg(any(feature = "std", test))]
-use sp_std::map;
 use sp_version::RuntimeVersion;
 
 use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen};
@@ -1920,12 +1918,14 @@ impl<T: Config> Pallet<T> {
 	#[cfg(any(feature = "std", test))]
 	pub fn externalities() -> TestExternalities {
 		TestExternalities::new(sp_core::storage::Storage {
-			top: map![
-				<BlockHash<T>>::hashed_key_for(BlockNumberFor::<T>::zero()) => [69u8; 32].encode(),
-				<Number<T>>::hashed_key().to_vec() => BlockNumberFor::<T>::one().encode(),
-				<ParentHash<T>>::hashed_key().to_vec() => [69u8; 32].encode()
-			],
-			children_default: map![],
+			top: [
+				(<BlockHash<T>>::hashed_key_for(BlockNumberFor::<T>::zero()), [69u8; 32].encode()),
+				(<Number<T>>::hashed_key().to_vec(), BlockNumberFor::<T>::one().encode()),
+				(<ParentHash<T>>::hashed_key().to_vec(), [69u8; 32].encode()),
+			]
+			.into_iter()
+			.collect(),
+			children_default: Default::default(),
 		})
 	}
 
diff --git a/substrate/frame/uniques/Cargo.toml b/substrate/frame/uniques/Cargo.toml
index 135292fb4ec..a2473c51ee7 100644
--- a/substrate/frame/uniques/Cargo.toml
+++ b/substrate/frame/uniques/Cargo.toml
@@ -28,7 +28,6 @@ sp-runtime = { workspace = true }
 pallet-balances = { workspace = true, default-features = true }
 sp-core = { workspace = true, default-features = true }
 sp-io = { workspace = true, default-features = true }
-sp-std = { workspace = true, default-features = true }
 
 [features]
 default = ["std"]
diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs
index d8510a60a78..6b8471f8484 100644
--- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs
+++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs
@@ -683,7 +683,7 @@ mod legacy {
 		Extra: Encode,
 	{
 		fn encode(&self) -> Vec<u8> {
-			let mut tmp = Vec::with_capacity(sp_std::mem::size_of::<Self>());
+			let mut tmp = Vec::with_capacity(core::mem::size_of::<Self>());
 
 			// 1 byte version id.
 			match self.signature.as_ref() {
diff --git a/substrate/primitives/runtime/src/proving_trie/base16.rs b/substrate/primitives/runtime/src/proving_trie/base16.rs
index da05c551c6d..abdf6ed178b 100644
--- a/substrate/primitives/runtime/src/proving_trie/base16.rs
+++ b/substrate/primitives/runtime/src/proving_trie/base16.rs
@@ -26,8 +26,8 @@
 
 use super::{ProofToHashes, ProvingTrie, TrieError};
 use crate::{Decode, DispatchError, Encode};
+use alloc::vec::Vec;
 use codec::MaxEncodedLen;
-use sp_std::vec::Vec;
 use sp_trie::{
 	trie_types::{TrieDBBuilder, TrieDBMutBuilderV1},
 	LayoutV1, MemoryDB, Trie, TrieMut,
@@ -197,7 +197,7 @@ mod tests {
 	use super::*;
 	use crate::traits::BlakeTwo256;
 	use sp_core::H256;
-	use sp_std::collections::btree_map::BTreeMap;
+	use std::collections::BTreeMap;
 
 	// A trie which simulates a trie of accounts (u32) and balances (u128).
 	type BalanceTrie = BasicProvingTrie<BlakeTwo256, u32, u128>;
diff --git a/substrate/primitives/runtime/src/proving_trie/base2.rs b/substrate/primitives/runtime/src/proving_trie/base2.rs
index 2b14a59ab05..8a7cfaa5149 100644
--- a/substrate/primitives/runtime/src/proving_trie/base2.rs
+++ b/substrate/primitives/runtime/src/proving_trie/base2.rs
@@ -22,9 +22,9 @@
 
 use super::{ProofToHashes, ProvingTrie, TrieError};
 use crate::{Decode, DispatchError, Encode};
+use alloc::{collections::BTreeMap, vec::Vec};
 use binary_merkle_tree::{merkle_proof, merkle_root, MerkleProof};
 use codec::MaxEncodedLen;
-use sp_std::{collections::btree_map::BTreeMap, vec::Vec};
 
 /// A helper structure for building a basic base-2 merkle trie and creating compact proofs for that
 /// trie.
@@ -161,7 +161,7 @@ mod tests {
 	use super::*;
 	use crate::traits::BlakeTwo256;
 	use sp_core::H256;
-	use sp_std::collections::btree_map::BTreeMap;
+	use std::collections::BTreeMap;
 
 	// A trie which simulates a trie of accounts (u32) and balances (u128).
 	type BalanceTrie = BasicProvingTrie<BlakeTwo256, u32, u128>;
diff --git a/substrate/primitives/runtime/src/proving_trie/mod.rs b/substrate/primitives/runtime/src/proving_trie/mod.rs
index 009aa6d4935..32b2284b4d7 100644
--- a/substrate/primitives/runtime/src/proving_trie/mod.rs
+++ b/substrate/primitives/runtime/src/proving_trie/mod.rs
@@ -23,7 +23,7 @@ pub mod base2;
 use crate::{Decode, DispatchError, Encode, MaxEncodedLen, TypeInfo};
 #[cfg(feature = "serde")]
 use crate::{Deserialize, Serialize};
-use sp_std::vec::Vec;
+use alloc::vec::Vec;
 use sp_trie::{trie_types::TrieError as SpTrieError, VerifyError};
 
 /// A runtime friendly error type for tries.
diff --git a/substrate/primitives/runtime/src/runtime_logger.rs b/substrate/primitives/runtime/src/runtime_logger.rs
index 79984b13567..ec5251d978f 100644
--- a/substrate/primitives/runtime/src/runtime_logger.rs
+++ b/substrate/primitives/runtime/src/runtime_logger.rs
@@ -54,10 +54,10 @@ impl log::Log for RuntimeLogger {
 
 	fn log(&self, record: &log::Record) {
 		use core::fmt::Write;
-		let mut w = sp_std::Writer::default();
-		let _ = ::core::write!(&mut w, "{}", record.args());
+		let mut msg = alloc::string::String::default();
+		let _ = ::core::write!(&mut msg, "{}", record.args());
 
-		sp_io::logging::log(record.level().into(), record.target(), w.inner());
+		sp_io::logging::log(record.level().into(), record.target(), msg.as_bytes());
 	}
 
 	fn flush(&self) {}
diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs
index d371152dc40..5b6cacc7e00 100644
--- a/substrate/primitives/runtime/src/traits/mod.rs
+++ b/substrate/primitives/runtime/src/traits/mod.rs
@@ -1710,7 +1710,7 @@ pub trait SignedExtension:
 	/// This method provides a default implementation that returns a vec containing a single
 	/// [`TransactionExtensionMetadata`].
 	fn metadata() -> Vec<TransactionExtensionMetadata> {
-		sp_std::vec![TransactionExtensionMetadata {
+		alloc::vec![TransactionExtensionMetadata {
 			identifier: Self::IDENTIFIER,
 			ty: scale_info::meta_type::<Self>(),
 			implicit: scale_info::meta_type::<Self::AdditionalSigned>()
diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs
index 4d95e5e6f3a..15be1e4c8e0 100644
--- a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs
+++ b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs
@@ -24,11 +24,12 @@ use crate::{
 	},
 	DispatchResult,
 };
+use alloc::vec::Vec;
 use codec::{Codec, Decode, Encode};
-use impl_trait_for_tuples::impl_for_tuples;
+use core::fmt::Debug;
 #[doc(hidden)]
-pub use sp_std::marker::PhantomData;
-use sp_std::{self, fmt::Debug, prelude::*};
+pub use core::marker::PhantomData;
+use impl_trait_for_tuples::impl_for_tuples;
 use sp_weights::Weight;
 use tuplex::{PopFront, PushBack};
 
@@ -258,7 +259,7 @@ pub trait TransactionExtension<Call: Dispatchable>:
 	/// This method provides a default implementation that returns a vec containing a single
 	/// [`TransactionExtensionMetadata`].
 	fn metadata() -> Vec<TransactionExtensionMetadata> {
-		sp_std::vec![TransactionExtensionMetadata {
+		alloc::vec![TransactionExtensionMetadata {
 			identifier: Self::IDENTIFIER,
 			ty: scale_info::meta_type::<Self>(),
 			implicit: scale_info::meta_type::<Self::Implicit>()
@@ -668,7 +669,7 @@ impl<Call: Dispatchable> TransactionExtension<Call> for Tuple {
 impl<Call: Dispatchable> TransactionExtension<Call> for () {
 	const IDENTIFIER: &'static str = "UnitTransactionExtension";
 	type Implicit = ();
-	fn implicit(&self) -> sp_std::result::Result<Self::Implicit, TransactionValidityError> {
+	fn implicit(&self) -> core::result::Result<Self::Implicit, TransactionValidityError> {
 		Ok(())
 	}
 	type Val = ();
-- 
GitLab


From 1059be75c36634dff26a9b8711447a0c66926582 Mon Sep 17 00:00:00 2001
From: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com>
Date: Tue, 7 Jan 2025 11:14:13 +0200
Subject: [PATCH 026/116] workflows: add debug input for sync templates act
 (#7057)

# Description

Introduce a workflow `debug` input for `misc-sync-templates.yml` and use
it instead of the `runner.debug` context variable, which is set to '1'
when `ACTIONS_RUNNER_DEBUG` env/secret is set
(https://docs.github.com/en/actions/monitoring-and-troubleshooting-workflows/troubleshooting-workflows/enabling-debug-logging#enabling-runner-diagnostic-logging).
This is useful for controlling when to show debug prints.

## Integration

N/A

## Review Notes

Using `runner.debug` requires setting the `ACTIONS_RUNNER_DEBUG` env
variable, but setting it to false/true is doable through an input, or by
importing a variable from the github env file (which requires a code
change). This input alone can replace the entire `runner.debug` +
`ACTIONS_RUNNER_DEBUG` setup, which simplifies debug printing, but it
doesn't look as standard as `runner.debug`. I don't think it is a big
deal overall, for this action alone, but happy to account for other
opinions.

Note: setting the `ACTIONS_RUNNER_DEBUG` whenever we want in a separate
branch wouldn't be useful because we can not run the
`misc-sync-templates.yml` action from other branch than `master` (due to
branch protection rules), so we need to expose this input to be
controllable from `master`.

---------

Signed-off-by: Iulian Barbu <iulian.barbu@parity.io>
---
 .github/workflows/misc-sync-templates.yml | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/misc-sync-templates.yml b/.github/workflows/misc-sync-templates.yml
index 7ff0705fe24..8d06d89621d 100644
--- a/.github/workflows/misc-sync-templates.yml
+++ b/.github/workflows/misc-sync-templates.yml
@@ -21,6 +21,10 @@ on:
       stable_release_branch:
         description: 'Stable release branch, e.g. stable2407'
         required: true
+      debug:
+        description: Enable runner debug logging
+        required: false
+        default: false
 
 jobs:
   sync-templates:
@@ -86,7 +90,7 @@ jobs:
           EOF
 
           [ ${{ matrix.template }} != "solochain" ] && echo "# Leave out the node compilation from regular template usage." \
-            && echo "\"default-members\" = [\"pallets/template\", \"runtime\"]" >> Cargo.toml
+            && echo "default-members = [\"pallets/template\", \"runtime\"]" >> Cargo.toml
           [ ${{ matrix.template }} == "solochain" ] && echo "# The node isn't yet replaceable by Omni Node."
           cat << EOF >> Cargo.toml
           members = [
@@ -115,8 +119,9 @@ jobs:
           toml set templates/${{ matrix.template }}/Cargo.toml 'workspace.package.edition' "$(toml get --raw Cargo.toml 'workspace.package.edition')" > Cargo.temp
           mv Cargo.temp ./templates/${{ matrix.template }}/Cargo.toml
         working-directory: polkadot-sdk
+
       - name: Print the result Cargo.tomls for debugging
-        if: runner.debug == '1'
+        if: ${{ github.event.inputs.debug }}
         run: find . -type f -name 'Cargo.toml' -exec cat {} \;
         working-directory: polkadot-sdk/templates/${{ matrix.template }}/
 
@@ -142,6 +147,12 @@ jobs:
           done;
         working-directory: "${{ env.template-path }}"
 
+      - name: Print the result Cargo.tomls for debugging after copying required workspace dependencies
+        if: ${{ github.event.inputs.debug }}
+        run: find . -type f -name 'Cargo.toml' -exec cat {} \;
+        working-directory: polkadot-sdk/templates/${{ matrix.template }}/
+
+
       # 3. Verify the build. Push the changes or create a PR.
 
       # We've run into out-of-disk error when compiling in the next step, so we free up some space this way.
-- 
GitLab


From d2c157a467f8dd72b86da0b2070d960d5dcad60d Mon Sep 17 00:00:00 2001
From: Utkarsh Bhardwaj <ub2262000@gmail.com>
Date: Tue, 7 Jan 2025 12:39:14 +0000
Subject: [PATCH 027/116] migrate pallet-node-authorization to use umbrella
 crate (#7040)

# Description

Migrate pallet-node-authorization to use umbrella crate. Part of #6504

## Review Notes

* This PR migrates pallet-node-authorization to use the umbrella crate.
* Some imports like below have not been added to any prelude as they
have very limited usage across the various pallets.
```rust
use sp_core::OpaquePeerId as PeerId;
```
* Added a commonly used runtime trait for testing in the
`testing_prelude` in `substrate/frame/src/lib.rs`:
```rust
pub use sp_runtime::traits::BadOrigin;
```
* `weights.rs` uses the `weights_prelude` like:
```rust
use frame::weights_prelude::*;
```
* `tests.rs` and `mock.rs` use the `testing_prelude`:
```rust
use frame::testing_prelude::*;
```
* `lib.rs` uses the main `prelude` like:
```rust
use frame::prelude::*;
```
* For testing: Checked that local build works and tests run
successfully.
---
 Cargo.lock                                       |  6 +-----
 prdoc/pr_7040.prdoc                              | 16 ++++++++++++++++
 substrate/frame/node-authorization/Cargo.toml    | 16 +++-------------
 substrate/frame/node-authorization/src/lib.rs    | 14 +++++++-------
 substrate/frame/node-authorization/src/mock.rs   |  8 +++-----
 substrate/frame/node-authorization/src/tests.rs  |  3 +--
 .../frame/node-authorization/src/weights.rs      |  3 +--
 substrate/frame/src/lib.rs                       |  3 +++
 8 files changed, 35 insertions(+), 34 deletions(-)
 create mode 100644 prdoc/pr_7040.prdoc

diff --git a/Cargo.lock b/Cargo.lock
index ef0eb9f7e3d..cc34514aeb9 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -14264,14 +14264,10 @@ dependencies = [
 name = "pallet-node-authorization"
 version = "28.0.0"
 dependencies = [
- "frame-support 28.0.0",
- "frame-system 28.0.0",
  "log",
  "parity-scale-codec",
+ "polkadot-sdk-frame 0.1.0",
  "scale-info",
- "sp-core 28.0.0",
- "sp-io 30.0.0",
- "sp-runtime 31.0.1",
 ]
 
 [[package]]
diff --git a/prdoc/pr_7040.prdoc b/prdoc/pr_7040.prdoc
new file mode 100644
index 00000000000..f88e96a7037
--- /dev/null
+++ b/prdoc/pr_7040.prdoc
@@ -0,0 +1,16 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: '[pallet-node-authorization] Migrate to using frame umbrella crate'
+
+doc:
+  - audience: Runtime Dev
+    description: This PR migrates the pallet-node-authorization to use the frame umbrella crate. This
+      is part of the ongoing effort to migrate all pallets to use the frame umbrella crate.
+      The effort is tracked [here](https://github.com/paritytech/polkadot-sdk/issues/6504).
+
+crates:
+  - name: pallet-node-authorization
+    bump: minor
+  - name: polkadot-sdk-frame
+    bump: minor
diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml
index 17473649393..7e55ad17809 100644
--- a/substrate/frame/node-authorization/Cargo.toml
+++ b/substrate/frame/node-authorization/Cargo.toml
@@ -16,28 +16,18 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { features = ["derive"], workspace = true }
-frame-support = { workspace = true }
-frame-system = { workspace = true }
+frame = { workspace = true, features = ["experimental", "runtime"] }
 log = { workspace = true }
 scale-info = { features = ["derive"], workspace = true }
-sp-core = { workspace = true }
-sp-io = { workspace = true }
-sp-runtime = { workspace = true }
 
 [features]
 default = ["std"]
 std = [
 	"codec/std",
-	"frame-support/std",
-	"frame-system/std",
+	"frame/std",
 	"log/std",
 	"scale-info/std",
-	"sp-core/std",
-	"sp-io/std",
-	"sp-runtime/std",
 ]
 try-runtime = [
-	"frame-support/try-runtime",
-	"frame-system/try-runtime",
-	"sp-runtime/try-runtime",
+	"frame/try-runtime",
 ]
diff --git a/substrate/frame/node-authorization/src/lib.rs b/substrate/frame/node-authorization/src/lib.rs
index 7682b54ea0f..3cec0d3bcb6 100644
--- a/substrate/frame/node-authorization/src/lib.rs
+++ b/substrate/frame/node-authorization/src/lib.rs
@@ -47,18 +47,18 @@ pub mod weights;
 extern crate alloc;
 
 use alloc::{collections::btree_set::BTreeSet, vec::Vec};
+use frame::{
+	deps::{sp_core::OpaquePeerId as PeerId, sp_io},
+	prelude::*,
+};
 pub use pallet::*;
-use sp_core::OpaquePeerId as PeerId;
-use sp_runtime::traits::StaticLookup;
 pub use weights::WeightInfo;
 
 type AccountIdLookupOf<T> = <<T as frame_system::Config>::Lookup as StaticLookup>::Source;
 
-#[frame_support::pallet]
+#[frame::pallet]
 pub mod pallet {
 	use super::*;
-	use frame_support::pallet_prelude::*;
-	use frame_system::pallet_prelude::*;
 
 	#[pallet::pallet]
 	#[pallet::without_storage_info]
@@ -111,7 +111,7 @@ pub mod pallet {
 		StorageMap<_, Blake2_128Concat, PeerId, BTreeSet<PeerId>, ValueQuery>;
 
 	#[pallet::genesis_config]
-	#[derive(frame_support::DefaultNoBound)]
+	#[derive(DefaultNoBound)]
 	pub struct GenesisConfig<T: Config> {
 		pub nodes: Vec<(PeerId, T::AccountId)>,
 	}
@@ -171,7 +171,7 @@ pub mod pallet {
 	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
 		/// Set reserved node every block. It may not be enabled depends on the offchain
 		/// worker settings when starting the node.
-		fn offchain_worker(now: frame_system::pallet_prelude::BlockNumberFor<T>) {
+		fn offchain_worker(now: BlockNumberFor<T>) {
 			let network_state = sp_io::offchain::network_state();
 			match network_state {
 				Err(_) => log::error!(
diff --git a/substrate/frame/node-authorization/src/mock.rs b/substrate/frame/node-authorization/src/mock.rs
index 656d2bfa39a..c6665a479e1 100644
--- a/substrate/frame/node-authorization/src/mock.rs
+++ b/substrate/frame/node-authorization/src/mock.rs
@@ -20,13 +20,11 @@
 use super::*;
 use crate as pallet_node_authorization;
 
-use frame_support::{derive_impl, ord_parameter_types, traits::ConstU32};
-use frame_system::EnsureSignedBy;
-use sp_runtime::BuildStorage;
+use frame::testing_prelude::*;
 
 type Block = frame_system::mocking::MockBlock<Test>;
 
-frame_support::construct_runtime!(
+construct_runtime!(
 	pub enum Test
 	{
 		System: frame_system,
@@ -61,7 +59,7 @@ pub fn test_node(id: u8) -> PeerId {
 	PeerId(vec![id])
 }
 
-pub fn new_test_ext() -> sp_io::TestExternalities {
+pub fn new_test_ext() -> TestState {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 	pallet_node_authorization::GenesisConfig::<Test> {
 		nodes: vec![(test_node(10), 10), (test_node(20), 20), (test_node(30), 30)],
diff --git a/substrate/frame/node-authorization/src/tests.rs b/substrate/frame/node-authorization/src/tests.rs
index 4704b5adf26..cf60ab6efbd 100644
--- a/substrate/frame/node-authorization/src/tests.rs
+++ b/substrate/frame/node-authorization/src/tests.rs
@@ -19,8 +19,7 @@
 
 use super::*;
 use crate::mock::*;
-use frame_support::{assert_noop, assert_ok};
-use sp_runtime::traits::BadOrigin;
+use frame::testing_prelude::*;
 
 #[test]
 fn add_well_known_node_works() {
diff --git a/substrate/frame/node-authorization/src/weights.rs b/substrate/frame/node-authorization/src/weights.rs
index 881eeaf7a4c..cd2935458b9 100644
--- a/substrate/frame/node-authorization/src/weights.rs
+++ b/substrate/frame/node-authorization/src/weights.rs
@@ -21,8 +21,7 @@
 #![allow(unused_parens)]
 #![allow(unused_imports)]
 
-use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
-use core::marker::PhantomData;
+use frame::weights_prelude::*;
 
 pub trait WeightInfo {
 	fn add_well_known_node() -> Weight;
diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs
index b0338b68231..15601ebde1f 100644
--- a/substrate/frame/src/lib.rs
+++ b/substrate/frame/src/lib.rs
@@ -328,6 +328,9 @@ pub mod testing_prelude {
 	pub use sp_io::TestExternalities;
 
 	pub use sp_io::TestExternalities as TestState;
+
+	/// Commonly used runtime traits for testing.
+	pub use sp_runtime::traits::BadOrigin;
 }
 
 /// All of the types and tools needed to build FRAME-based runtimes.
-- 
GitLab


From be20c65743cefdaa5a3bfb29c2fc668fc57d8925 Mon Sep 17 00:00:00 2001
From: Andrei Eres <eresav@me.com>
Date: Tue, 7 Jan 2025 13:59:23 +0100
Subject: [PATCH 028/116] Implement NetworkRequest for litep2p (#7073)

# Description

Implements NetworkRequest::request for litep2p that we need for
networking benchmarks


## Review Notes

Duplicates implementation for NetworkService

https://github.com/paritytech/polkadot-sdk/blob/5bf9dd2aa9bf944434203128783925bdc2ad8c01/substrate/client/network/src/service.rs#L1186-L1205

---------

Co-authored-by: command-bot <>
---
 prdoc/pr_7073.prdoc                           | 16 ++++++++++++
 .../client/network/src/litep2p/service.rs     | 26 +++++++++++++------
 2 files changed, 34 insertions(+), 8 deletions(-)
 create mode 100644 prdoc/pr_7073.prdoc

diff --git a/prdoc/pr_7073.prdoc b/prdoc/pr_7073.prdoc
new file mode 100644
index 00000000000..3bcd129d031
--- /dev/null
+++ b/prdoc/pr_7073.prdoc
@@ -0,0 +1,16 @@
+title: Implement NetworkRequest for litep2p
+doc:
+- audience: Node Dev
+  description: |-
+    # Description
+
+    Implements NetworkRequest::request for litep2p that we need for networking benchmarks
+
+
+    ## Review Notes
+
+    Duplicates implementation for NetworkService
+    https://github.com/paritytech/polkadot-sdk/blob/5bf9dd2aa9bf944434203128783925bdc2ad8c01/substrate/client/network/src/service.rs#L1186-L1205
+crates:
+- name: sc-network
+  bump: patch
diff --git a/substrate/client/network/src/litep2p/service.rs b/substrate/client/network/src/litep2p/service.rs
index d270e90efdf..2d4a117d156 100644
--- a/substrate/client/network/src/litep2p/service.rs
+++ b/substrate/client/network/src/litep2p/service.rs
@@ -28,8 +28,8 @@ use crate::{
 	peer_store::PeerStoreProvider,
 	service::out_events,
 	Event, IfDisconnected, NetworkDHTProvider, NetworkEventStream, NetworkPeers, NetworkRequest,
-	NetworkSigner, NetworkStateInfo, NetworkStatus, NetworkStatusProvider, ProtocolName,
-	RequestFailure, Signature,
+	NetworkSigner, NetworkStateInfo, NetworkStatus, NetworkStatusProvider, OutboundFailure,
+	ProtocolName, RequestFailure, Signature,
 };
 
 use codec::DecodeAll;
@@ -526,13 +526,23 @@ impl NetworkStateInfo for Litep2pNetworkService {
 impl NetworkRequest for Litep2pNetworkService {
 	async fn request(
 		&self,
-		_target: PeerId,
-		_protocol: ProtocolName,
-		_request: Vec<u8>,
-		_fallback_request: Option<(Vec<u8>, ProtocolName)>,
-		_connect: IfDisconnected,
+		target: PeerId,
+		protocol: ProtocolName,
+		request: Vec<u8>,
+		fallback_request: Option<(Vec<u8>, ProtocolName)>,
+		connect: IfDisconnected,
 	) -> Result<(Vec<u8>, ProtocolName), RequestFailure> {
-		unimplemented!();
+		let (tx, rx) = oneshot::channel();
+
+		self.start_request(target, protocol, request, fallback_request, tx, connect);
+
+		match rx.await {
+			Ok(v) => v,
+			// The channel can only be closed if the network worker no longer exists. If the
+			// network worker no longer exists, then all connections to `target` are necessarily
+			// closed, and we legitimately report this situation as a "ConnectionClosed".
+			Err(_) => Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)),
+		}
 	}
 
 	fn start_request(
-- 
GitLab


From 064f10c495993c3f6bb4e015780e1ffb0dac3732 Mon Sep 17 00:00:00 2001
From: Alin Dima <alin@parity.io>
Date: Tue, 7 Jan 2025 15:28:21 +0200
Subject: [PATCH 029/116] rewrite some flaky zombienet polkadot tests to
 zombienet-sdk (#6757)

Will fix:
https://github.com/paritytech/polkadot-sdk/issues/6574
https://github.com/paritytech/polkadot-sdk/issues/6644
https://github.com/paritytech/polkadot-sdk/issues/6062

---------

Co-authored-by: Javier Viola <javier@parity.io>
---
 .gitlab/pipeline/zombienet/polkadot.yml       | 113 +++--
 Cargo.lock                                    | 418 +++---------------
 Cargo.toml                                    |   2 +-
 polkadot/zombienet-sdk-tests/Cargo.toml       |   1 +
 .../tests/elastic_scaling/basic_3cores.rs     | 135 ++++++
 .../doesnt_break_parachains.rs                | 133 ++++++
 .../tests/elastic_scaling/mod.rs              |   6 +-
 .../elastic_scaling/slot_based_3cores.rs      |  18 +-
 .../async_backing_6_seconds_rate.rs           |  95 ++++
 .../tests/functional/mod.rs                   |   5 +
 .../tests/functional/sync_backing.rs          |  74 ++++
 .../helpers.rs => helpers/mod.rs}             |  29 +-
 polkadot/zombienet-sdk-tests/tests/lib.rs     |   5 +
 .../tests/smoke/coretime_revenue.rs           |  23 +-
 .../0001-basic-3cores-6s-blocks.toml          |  49 --
 .../0001-basic-3cores-6s-blocks.zndsl         |  28 --
 ...astic-scaling-doesnt-break-parachains.toml |  40 --
 ...stic-scaling-doesnt-break-parachains.zndsl |  20 -
 .../elastic_scaling/assign-core.js            |   1 -
 .../0011-async-backing-6-seconds-rate.toml    |  54 ---
 .../0011-async-backing-6-seconds-rate.zndsl   |  20 -
 .../functional/0017-sync-backing.toml         |  48 --
 .../functional/0017-sync-backing.zndsl        |  22 -
 23 files changed, 637 insertions(+), 702 deletions(-)
 create mode 100644 polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs
 create mode 100644 polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs
 create mode 100644 polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs
 create mode 100644 polkadot/zombienet-sdk-tests/tests/functional/mod.rs
 create mode 100644 polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs
 rename polkadot/zombienet-sdk-tests/tests/{elastic_scaling/helpers.rs => helpers/mod.rs} (65%)
 delete mode 100644 polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml
 delete mode 100644 polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl
 delete mode 100644 polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml
 delete mode 100644 polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl
 delete mode 120000 polkadot/zombienet_tests/elastic_scaling/assign-core.js
 delete mode 100644 polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml
 delete mode 100644 polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.zndsl
 delete mode 100644 polkadot/zombienet_tests/functional/0017-sync-backing.toml
 delete mode 100644 polkadot/zombienet_tests/functional/0017-sync-backing.zndsl

diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml
index 14a235bcda8..878f241317a 100644
--- a/.gitlab/pipeline/zombienet/polkadot.yml
+++ b/.gitlab/pipeline/zombienet/polkadot.yml
@@ -160,39 +160,6 @@ zombienet-polkadot-functional-0010-validator-disabling:
       --local-dir="${LOCAL_DIR}/functional"
       --test="0010-validator-disabling.zndsl"
 
-.zombienet-polkadot-functional-0011-async-backing-6-seconds-rate:
-  extends:
-    - .zombienet-polkadot-common
-  script:
-    - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
-      --local-dir="${LOCAL_DIR}/functional"
-      --test="0011-async-backing-6-seconds-rate.zndsl"
-
-zombienet-polkadot-elastic-scaling-0001-basic-3cores-6s-blocks:
-  extends:
-    - .zombienet-polkadot-common
-  variables:
-    FORCED_INFRA_INSTANCE: "spot-iops"
-  before_script:
-    - !reference [ .zombienet-polkadot-common, before_script ]
-    - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/elastic_scaling
-  script:
-    - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
-      --local-dir="${LOCAL_DIR}/elastic_scaling"
-      --test="0001-basic-3cores-6s-blocks.zndsl"
-
-.zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains:
-  extends:
-    - .zombienet-polkadot-common
-  before_script:
-    - !reference [ .zombienet-polkadot-common, before_script ]
-    - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/elastic_scaling
-  script:
-    - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
-      --local-dir="${LOCAL_DIR}/elastic_scaling"
-      --test="0002-elastic-scaling-doesnt-break-parachains.zndsl"
-
-
 .zombienet-polkadot-functional-0012-spam-statement-distribution-requests:
   extends:
     - .zombienet-polkadot-common
@@ -236,14 +203,6 @@ zombienet-polkadot-functional-0015-coretime-shared-core:
       --local-dir="${LOCAL_DIR}/functional"
       --test="0016-approval-voting-parallel.zndsl"
 
-.zombienet-polkadot-functional-0017-sync-backing:
-  extends:
-    - .zombienet-polkadot-common
-  script:
-    - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
-      --local-dir="${LOCAL_DIR}/functional"
-      --test="0017-sync-backing.zndsl"
-
 zombienet-polkadot-functional-0018-shared-core-idle-parachain:
   extends:
     - .zombienet-polkadot-common
@@ -386,6 +345,8 @@ zombienet-polkadot-malus-0001-dispute-valid:
       --local-dir="${LOCAL_DIR}/integrationtests"
       --test="0001-dispute-valid-block.zndsl"
 
+# sdk tests
+
 .zombienet-polkadot-coretime-revenue:
   extends:
     - .zombienet-polkadot-common
@@ -411,8 +372,78 @@ zombienet-polkadot-elastic-scaling-slot-based-3cores:
     - !reference [ ".zombienet-polkadot-common", "before_script" ]
     - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}"
     - export CUMULUS_IMAGE="docker.io/paritypr/test-parachain:${PIPELINE_IMAGE_TAG}"
+    - export X_INFRA_INSTANCE=spot # use spot by default
   script:
     # we want to use `--no-capture` in zombienet tests.
     - unset NEXTEST_FAILURE_OUTPUT
     - unset NEXTEST_SUCCESS_OUTPUT
     - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- elastic_scaling::slot_based_3cores::slot_based_3cores_test
+
+zombienet-polkadot-elastic-scaling-doesnt-break-parachains:
+  extends:
+    - .zombienet-polkadot-common
+  needs:
+    - job: build-polkadot-zombienet-tests
+      artifacts: true
+  before_script:
+    - !reference [ ".zombienet-polkadot-common", "before_script" ]
+    - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}"
+    - export X_INFRA_INSTANCE=spot # use spot by default
+  variables:
+    KUBERNETES_CPU_REQUEST: "1"
+  script:
+    # we want to use `--no-capture` in zombienet tests.
+    - unset NEXTEST_FAILURE_OUTPUT
+    - unset NEXTEST_SUCCESS_OUTPUT
+    - RUST_LOG=info,zombienet_=trace cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- elastic_scaling::doesnt_break_parachains::doesnt_break_parachains_test
+
+zombienet-polkadot-elastic-scaling-basic-3cores:
+  extends:
+    - .zombienet-polkadot-common
+  needs:
+    - job: build-polkadot-zombienet-tests
+      artifacts: true
+  before_script:
+    - !reference [ ".zombienet-polkadot-common", "before_script" ]
+    - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}"
+    - export CUMULUS_IMAGE="${COL_IMAGE}"
+    - export X_INFRA_INSTANCE=spot # use spot by default
+  script:
+    # we want to use `--no-capture` in zombienet tests.
+    - unset NEXTEST_FAILURE_OUTPUT
+    - unset NEXTEST_SUCCESS_OUTPUT
+    - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- elastic_scaling::basic_3cores::basic_3cores_test
+
+zombienet-polkadot-functional-sync-backing:
+  extends:
+    - .zombienet-polkadot-common
+  needs:
+    - job: build-polkadot-zombienet-tests
+      artifacts: true
+  before_script:
+    - !reference [ ".zombienet-polkadot-common", "before_script" ]
+    - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}"
+    # Hardcoded to an old polkadot-parachain image, pre async backing.
+    - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:master-99623e62"
+    - export X_INFRA_INSTANCE=spot # use spot by default
+  script:
+    # we want to use `--no-capture` in zombienet tests.
+    - unset NEXTEST_FAILURE_OUTPUT
+    - unset NEXTEST_SUCCESS_OUTPUT
+    - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- functional::sync_backing::sync_backing_test
+
+zombienet-polkadot-functional-async-backing-6-seconds-rate:
+  extends:
+    - .zombienet-polkadot-common
+  needs:
+    - job: build-polkadot-zombienet-tests
+      artifacts: true
+  before_script:
+    - !reference [ ".zombienet-polkadot-common", "before_script" ]
+    - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}"
+    - export X_INFRA_INSTANCE=spot # use spot by default
+  script:
+    # we want to use `--no-capture` in zombienet tests.
+    - unset NEXTEST_FAILURE_OUTPUT
+    - unset NEXTEST_SUCCESS_OUTPUT
+    - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- functional::async_backing_6_seconds_rate::async_backing_6_seconds_rate_test
diff --git a/Cargo.lock b/Cargo.lock
index cc34514aeb9..0a22179eb3d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6213,7 +6213,7 @@ dependencies = [
  "regex",
  "syn 2.0.87",
  "termcolor",
- "toml 0.8.12",
+ "toml 0.8.19",
  "walkdir",
 ]
 
@@ -9777,29 +9777,6 @@ dependencies = [
  "libc",
 ]
 
-[[package]]
-name = "libp2p"
-version = "0.52.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e94495eb319a85b70a68b85e2389a95bb3555c71c49025b78c691a854a7e6464"
-dependencies = [
- "bytes",
- "either",
- "futures",
- "futures-timer",
- "getrandom",
- "instant",
- "libp2p-allow-block-list 0.2.0",
- "libp2p-connection-limits 0.2.1",
- "libp2p-core 0.40.1",
- "libp2p-identity",
- "libp2p-swarm 0.43.7",
- "multiaddr 0.18.1",
- "pin-project",
- "rw-stream-sink",
- "thiserror",
-]
-
 [[package]]
 name = "libp2p"
 version = "0.54.1"
@@ -9811,9 +9788,9 @@ dependencies = [
  "futures",
  "futures-timer",
  "getrandom",
- "libp2p-allow-block-list 0.4.0",
- "libp2p-connection-limits 0.4.0",
- "libp2p-core 0.42.0",
+ "libp2p-allow-block-list",
+ "libp2p-connection-limits",
+ "libp2p-core",
  "libp2p-dns",
  "libp2p-identify",
  "libp2p-identity",
@@ -9824,7 +9801,7 @@ dependencies = [
  "libp2p-ping",
  "libp2p-quic",
  "libp2p-request-response",
- "libp2p-swarm 0.45.1",
+ "libp2p-swarm",
  "libp2p-tcp",
  "libp2p-upnp",
  "libp2p-websocket",
@@ -9835,39 +9812,15 @@ dependencies = [
  "thiserror",
 ]
 
-[[package]]
-name = "libp2p-allow-block-list"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "55b46558c5c0bf99d3e2a1a38fd54ff5476ca66dd1737b12466a1824dd219311"
-dependencies = [
- "libp2p-core 0.40.1",
- "libp2p-identity",
- "libp2p-swarm 0.43.7",
- "void",
-]
-
 [[package]]
 name = "libp2p-allow-block-list"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041"
 dependencies = [
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
- "libp2p-swarm 0.45.1",
- "void",
-]
-
-[[package]]
-name = "libp2p-connection-limits"
-version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f5107ad45cb20b2f6c3628c7b6014b996fcb13a88053f4569c872c6e30abf58"
-dependencies = [
- "libp2p-core 0.40.1",
- "libp2p-identity",
- "libp2p-swarm 0.43.7",
+ "libp2p-swarm",
  "void",
 ]
 
@@ -9877,37 +9830,9 @@ version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8"
 dependencies = [
- "libp2p-core 0.42.0",
- "libp2p-identity",
- "libp2p-swarm 0.45.1",
- "void",
-]
-
-[[package]]
-name = "libp2p-core"
-version = "0.40.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd44289ab25e4c9230d9246c475a22241e301b23e8f4061d3bdef304a1a99713"
-dependencies = [
- "either",
- "fnv",
- "futures",
- "futures-timer",
- "instant",
+ "libp2p-core",
  "libp2p-identity",
- "log",
- "multiaddr 0.18.1",
- "multihash 0.19.1",
- "multistream-select",
- "once_cell",
- "parking_lot 0.12.3",
- "pin-project",
- "quick-protobuf 0.8.1",
- "rand",
- "rw-stream-sink",
- "smallvec",
- "thiserror",
- "unsigned-varint 0.7.2",
+ "libp2p-swarm",
  "void",
 ]
 
@@ -9948,7 +9873,7 @@ dependencies = [
  "async-trait",
  "futures",
  "hickory-resolver",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
  "parking_lot 0.12.3",
  "smallvec",
@@ -9966,9 +9891,9 @@ dependencies = [
  "futures",
  "futures-bounded",
  "futures-timer",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
- "libp2p-swarm 0.45.1",
+ "libp2p-swarm",
  "lru 0.12.3",
  "quick-protobuf 0.8.1",
  "quick-protobuf-codec",
@@ -10010,9 +9935,9 @@ dependencies = [
  "futures",
  "futures-bounded",
  "futures-timer",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
- "libp2p-swarm 0.45.1",
+ "libp2p-swarm",
  "quick-protobuf 0.8.1",
  "quick-protobuf-codec",
  "rand",
@@ -10035,9 +9960,9 @@ dependencies = [
  "futures",
  "hickory-proto",
  "if-watch",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
- "libp2p-swarm 0.45.1",
+ "libp2p-swarm",
  "rand",
  "smallvec",
  "socket2 0.5.7",
@@ -10053,12 +9978,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566"
 dependencies = [
  "futures",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identify",
  "libp2p-identity",
  "libp2p-kad",
  "libp2p-ping",
- "libp2p-swarm 0.45.1",
+ "libp2p-swarm",
  "pin-project",
  "prometheus-client",
  "web-time",
@@ -10074,7 +9999,7 @@ dependencies = [
  "bytes",
  "curve25519-dalek 4.1.3",
  "futures",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
  "multiaddr 0.18.1",
  "multihash 0.19.1",
@@ -10099,9 +10024,9 @@ dependencies = [
  "either",
  "futures",
  "futures-timer",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
- "libp2p-swarm 0.45.1",
+ "libp2p-swarm",
  "rand",
  "tracing",
  "void",
@@ -10118,7 +10043,7 @@ dependencies = [
  "futures",
  "futures-timer",
  "if-watch",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
  "libp2p-tls",
  "parking_lot 0.12.3",
@@ -10142,9 +10067,9 @@ dependencies = [
  "futures",
  "futures-bounded",
  "futures-timer",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
- "libp2p-swarm 0.45.1",
+ "libp2p-swarm",
  "rand",
  "smallvec",
  "tracing",
@@ -10152,27 +10077,6 @@ dependencies = [
  "web-time",
 ]
 
-[[package]]
-name = "libp2p-swarm"
-version = "0.43.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "580189e0074af847df90e75ef54f3f30059aedda37ea5a1659e8b9fca05c0141"
-dependencies = [
- "either",
- "fnv",
- "futures",
- "futures-timer",
- "instant",
- "libp2p-core 0.40.1",
- "libp2p-identity",
- "log",
- "multistream-select",
- "once_cell",
- "rand",
- "smallvec",
- "void",
-]
-
 [[package]]
 name = "libp2p-swarm"
 version = "0.45.1"
@@ -10183,7 +10087,7 @@ dependencies = [
  "fnv",
  "futures",
  "futures-timer",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
  "libp2p-swarm-derive",
  "lru 0.12.3",
@@ -10219,7 +10123,7 @@ dependencies = [
  "futures-timer",
  "if-watch",
  "libc",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
  "socket2 0.5.7",
  "tokio",
@@ -10234,7 +10138,7 @@ checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847"
 dependencies = [
  "futures",
  "futures-rustls",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
  "rcgen 0.11.3",
  "ring 0.17.8",
@@ -10254,8 +10158,8 @@ dependencies = [
  "futures",
  "futures-timer",
  "igd-next",
- "libp2p-core 0.42.0",
- "libp2p-swarm 0.45.1",
+ "libp2p-core",
+ "libp2p-swarm",
  "tokio",
  "tracing",
  "void",
@@ -10270,7 +10174,7 @@ dependencies = [
  "either",
  "futures",
  "futures-rustls",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "libp2p-identity",
  "parking_lot 0.12.3",
  "pin-project-lite",
@@ -10290,7 +10194,7 @@ checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882"
 dependencies = [
  "either",
  "futures",
- "libp2p-core 0.42.0",
+ "libp2p-core",
  "thiserror",
  "tracing",
  "yamux 0.12.1",
@@ -11300,17 +11204,6 @@ dependencies = [
  "libc",
 ]
 
-[[package]]
-name = "nix"
-version = "0.27.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053"
-dependencies = [
- "bitflags 2.6.0",
- "cfg-if",
- "libc",
-]
-
 [[package]]
 name = "nix"
 version = "0.29.0"
@@ -13015,7 +12908,7 @@ dependencies = [
  "parity-wasm",
  "sp-runtime 31.0.1",
  "tempfile",
- "toml 0.8.12",
+ "toml 0.8.19",
  "twox-hash",
 ]
 
@@ -14936,7 +14829,7 @@ dependencies = [
  "polkavm-linker 0.18.0",
  "sp-core 28.0.0",
  "sp-io 30.0.0",
- "toml 0.8.12",
+ "toml 0.8.19",
 ]
 
 [[package]]
@@ -14951,7 +14844,7 @@ dependencies = [
  "polkavm-linker 0.10.0",
  "sp-runtime 39.0.2",
  "tempfile",
- "toml 0.8.12",
+ "toml 0.8.19",
 ]
 
 [[package]]
@@ -19854,6 +19747,7 @@ dependencies = [
  "env_logger 0.11.3",
  "log",
  "parity-scale-codec",
+ "polkadot-primitives 7.0.0",
  "serde",
  "serde_json",
  "substrate-build-script-utils",
@@ -19930,12 +19824,6 @@ dependencies = [
  "log",
 ]
 
-[[package]]
-name = "polkavm-common"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "92c99f7eee94e7be43ba37eef65ad0ee8cbaf89b7c00001c3f6d2be985cb1817"
-
 [[package]]
 name = "polkavm-common"
 version = "0.9.0"
@@ -19965,15 +19853,6 @@ dependencies = [
  "polkavm-assembler 0.18.0",
 ]
 
-[[package]]
-name = "polkavm-derive"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "79fa916f7962348bd1bb1a65a83401675e6fc86c51a0fdbcf92a3108e58e6125"
-dependencies = [
- "polkavm-derive-impl-macro 0.8.0",
-]
-
 [[package]]
 name = "polkavm-derive"
 version = "0.9.1"
@@ -20001,18 +19880,6 @@ dependencies = [
  "polkavm-derive-impl-macro 0.18.0",
 ]
 
-[[package]]
-name = "polkavm-derive-impl"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c10b2654a8a10a83c260bfb93e97b262cf0017494ab94a65d389e0eda6de6c9c"
-dependencies = [
- "polkavm-common 0.8.0",
- "proc-macro2 1.0.86",
- "quote 1.0.37",
- "syn 2.0.87",
-]
-
 [[package]]
 name = "polkavm-derive-impl"
 version = "0.9.0"
@@ -20049,16 +19916,6 @@ dependencies = [
  "syn 2.0.87",
 ]
 
-[[package]]
-name = "polkavm-derive-impl-macro"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "15e85319a0d5129dc9f021c62607e0804f5fb777a05cdda44d750ac0732def66"
-dependencies = [
- "polkavm-derive-impl 0.8.0",
- "syn 2.0.87",
-]
-
 [[package]]
 name = "polkavm-derive-impl-macro"
 version = "0.9.0"
@@ -23042,7 +22899,7 @@ dependencies = [
  "futures",
  "futures-timer",
  "ip_network",
- "libp2p 0.54.1",
+ "libp2p",
  "linked_hash_set",
  "litep2p",
  "log",
@@ -23218,7 +23075,7 @@ dependencies = [
  "async-trait",
  "futures",
  "futures-timer",
- "libp2p 0.54.1",
+ "libp2p",
  "log",
  "parking_lot 0.12.3",
  "rand",
@@ -23675,7 +23532,7 @@ version = "15.0.0"
 dependencies = [
  "chrono",
  "futures",
- "libp2p 0.54.1",
+ "libp2p",
  "log",
  "parking_lot 0.12.3",
  "pin-project",
@@ -26177,53 +26034,6 @@ dependencies = [
  "zeroize",
 ]
 
-[[package]]
-name = "sp-core"
-version = "31.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "26d7a0fd8f16dcc3761198fc83be12872f823b37b749bc72a3a6a1f702509366"
-dependencies = [
- "array-bytes",
- "bitflags 1.3.2",
- "blake2 0.10.6",
- "bounded-collections",
- "bs58",
- "dyn-clonable",
- "ed25519-zebra 3.1.0",
- "futures",
- "hash-db",
- "hash256-std-hasher",
- "impl-serde 0.4.0",
- "itertools 0.10.5",
- "k256",
- "libsecp256k1",
- "log",
- "merlin",
- "parity-bip39",
- "parity-scale-codec",
- "parking_lot 0.12.3",
- "paste",
- "primitive-types 0.12.2",
- "rand",
- "scale-info",
- "schnorrkel 0.11.4",
- "secp256k1 0.28.2",
- "secrecy 0.8.0",
- "serde",
- "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sp-externalities 0.27.0",
- "sp-runtime-interface 26.0.0",
- "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sp-storage 20.0.0",
- "ss58-registry",
- "substrate-bip39 0.5.0",
- "thiserror",
- "tracing",
- "w3f-bls",
- "zeroize",
-]
-
 [[package]]
 name = "sp-core"
 version = "32.0.0"
@@ -26564,18 +26374,6 @@ dependencies = [
  "sp-storage 19.0.0",
 ]
 
-[[package]]
-name = "sp-externalities"
-version = "0.27.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1d6a4572eadd4a63cff92509a210bf425501a0c5e76574b30a366ac77653787"
-dependencies = [
- "environmental",
- "parity-scale-codec",
- "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sp-storage 20.0.0",
-]
-
 [[package]]
 name = "sp-externalities"
 version = "0.28.0"
@@ -27160,26 +26958,6 @@ dependencies = [
  "trybuild",
 ]
 
-[[package]]
-name = "sp-runtime-interface"
-version = "26.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e48a675ea4858333d4d755899ed5ed780174aa34fec15953428d516af5452295"
-dependencies = [
- "bytes",
- "impl-trait-for-tuples",
- "parity-scale-codec",
- "polkavm-derive 0.8.0",
- "primitive-types 0.12.2",
- "sp-externalities 0.27.0",
- "sp-runtime-interface-proc-macro 18.0.0",
- "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sp-storage 20.0.0",
- "sp-tracing 16.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sp-wasm-interface 20.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "static_assertions",
-]
-
 [[package]]
 name = "sp-runtime-interface"
 version = "27.0.0"
@@ -27537,20 +27315,6 @@ dependencies = [
  "sp-debug-derive 14.0.0",
 ]
 
-[[package]]
-name = "sp-storage"
-version = "20.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8dba5791cb3978e95daf99dad919ecb3ec35565604e88cd38d805d9d4981e8bd"
-dependencies = [
- "impl-serde 0.4.0",
- "parity-scale-codec",
- "ref-cast",
- "serde",
- "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
 [[package]]
 name = "sp-storage"
 version = "21.0.0"
@@ -27622,19 +27386,6 @@ dependencies = [
  "tracing-subscriber 0.3.18",
 ]
 
-[[package]]
-name = "sp-tracing"
-version = "16.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0351810b9d074df71c4514c5228ed05c250607cba131c1c9d1526760ab69c05c"
-dependencies = [
- "parity-scale-codec",
- "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "tracing",
- "tracing-core",
- "tracing-subscriber 0.2.25",
-]
-
 [[package]]
 name = "sp-tracing"
 version = "17.0.1"
@@ -27891,20 +27642,6 @@ dependencies = [
  "wasmtime",
 ]
 
-[[package]]
-name = "sp-wasm-interface"
-version = "20.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9ef97172c42eb4c6c26506f325f48463e9bc29b2034a587f1b9e48c751229bee"
-dependencies = [
- "anyhow",
- "impl-trait-for-tuples",
- "log",
- "parity-scale-codec",
- "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasmtime",
-]
-
 [[package]]
 name = "sp-wasm-interface"
 version = "21.0.1"
@@ -28444,19 +28181,6 @@ dependencies = [
  "zeroize",
 ]
 
-[[package]]
-name = "substrate-bip39"
-version = "0.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2b564c293e6194e8b222e52436bcb99f60de72043c7f845cf6c4406db4df121"
-dependencies = [
- "hmac 0.12.1",
- "pbkdf2",
- "schnorrkel 0.11.4",
- "sha2 0.10.8",
- "zeroize",
-]
-
 [[package]]
 name = "substrate-bip39"
 version = "0.6.0"
@@ -28797,7 +28521,7 @@ dependencies = [
  "sp-version 29.0.0",
  "strum 0.26.3",
  "tempfile",
- "toml 0.8.12",
+ "toml 0.8.19",
  "walkdir",
  "wasm-opt",
 ]
@@ -28818,7 +28542,7 @@ dependencies = [
  "sp-maybe-compressed-blob 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "strum 0.26.3",
  "tempfile",
- "toml 0.8.12",
+ "toml 0.8.19",
  "walkdir",
  "wasm-opt",
 ]
@@ -29815,33 +29539,21 @@ dependencies = [
 
 [[package]]
 name = "toml"
-version = "0.7.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257"
-dependencies = [
- "serde",
- "serde_spanned",
- "toml_datetime",
- "toml_edit 0.19.15",
-]
-
-[[package]]
-name = "toml"
-version = "0.8.12"
+version = "0.8.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3"
+checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e"
 dependencies = [
  "serde",
  "serde_spanned",
  "toml_datetime",
- "toml_edit 0.22.12",
+ "toml_edit 0.22.22",
 ]
 
 [[package]]
 name = "toml_datetime"
-version = "0.6.5"
+version = "0.6.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1"
+checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
 dependencies = [
  "serde",
 ]
@@ -29853,8 +29565,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
 dependencies = [
  "indexmap 2.7.0",
- "serde",
- "serde_spanned",
  "toml_datetime",
  "winnow 0.5.15",
 ]
@@ -29872,9 +29582,9 @@ dependencies = [
 
 [[package]]
 name = "toml_edit"
-version = "0.22.12"
+version = "0.22.22"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef"
+checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5"
 dependencies = [
  "indexmap 2.7.0",
  "serde",
@@ -32076,9 +31786,9 @@ dependencies = [
 
 [[package]]
 name = "zombienet-configuration"
-version = "0.2.19"
+version = "0.2.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d716b3ff8112d98ced15f53b0c72454f8cde533fe2b68bb04379228961efbd80"
+checksum = "5ced2fca1322821431f03d06dcf2ea74d3a7369760b6c587b372de6eada3ce43"
 dependencies = [
  "anyhow",
  "lazy_static",
@@ -32089,23 +31799,23 @@ dependencies = [
  "serde_json",
  "thiserror",
  "tokio",
- "toml 0.7.8",
+ "toml 0.8.19",
  "url",
  "zombienet-support",
 ]
 
 [[package]]
 name = "zombienet-orchestrator"
-version = "0.2.19"
+version = "0.2.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4098a7d33b729b59e32c41a87aa4d484bd1b8771a059bbd4edfb4d430b3b2d74"
+checksum = "86ecd17133c3129547b6472591b5e58d4aee1fc63c965a3418fd56d33a8a4e82"
 dependencies = [
  "anyhow",
  "async-trait",
  "futures",
  "glob-match",
  "hex",
- "libp2p 0.52.4",
+ "libp2p",
  "libsecp256k1",
  "multiaddr 0.18.1",
  "rand",
@@ -32114,7 +31824,7 @@ dependencies = [
  "serde",
  "serde_json",
  "sha2 0.10.8",
- "sp-core 31.0.0",
+ "sp-core 34.0.0",
  "subxt",
  "subxt-signer",
  "thiserror",
@@ -32129,9 +31839,9 @@ dependencies = [
 
 [[package]]
 name = "zombienet-prom-metrics-parser"
-version = "0.2.19"
+version = "0.2.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "961e30be45b34f6ebeabf29ee2f47b0cd191ea62e40c064752572207509a6f5c"
+checksum = "23702db0819a050c8a0130a769b105695137020a64207b4597aa021f06924552"
 dependencies = [
  "pest",
  "pest_derive",
@@ -32140,9 +31850,9 @@ dependencies = [
 
 [[package]]
 name = "zombienet-provider"
-version = "0.2.19"
+version = "0.2.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab0f7f01780b7c99a6c40539d195d979f234305f32808d547438b50829d44262"
+checksum = "83e903843c62cd811e7730ccc618dcd14444d20e8aadfcd7d7561c7b47d8f984"
 dependencies = [
  "anyhow",
  "async-trait",
@@ -32151,7 +31861,7 @@ dependencies = [
  "hex",
  "k8s-openapi",
  "kube",
- "nix 0.27.1",
+ "nix 0.29.0",
  "regex",
  "reqwest 0.11.27",
  "serde",
@@ -32171,9 +31881,9 @@ dependencies = [
 
 [[package]]
 name = "zombienet-sdk"
-version = "0.2.19"
+version = "0.2.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "99a3c5f2d657235b3ab7dc384677e63cde21983029e99106766ecd49e9f8d7f3"
+checksum = "e457b12c8fdc7003c12dd56855da09812ac11dd232e4ec01acccb2899fe05e44"
 dependencies = [
  "async-trait",
  "futures",
@@ -32189,14 +31899,14 @@ dependencies = [
 
 [[package]]
 name = "zombienet-support"
-version = "0.2.19"
+version = "0.2.20"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "296f887ea88e07edd771f8e1d0dec5297a58b422f4b884a6292a21ebe03277cb"
+checksum = "43547d65b19a92cf0ee44380239d82ef345e7d26f7b04b9e0ecf48496af6346b"
 dependencies = [
  "anyhow",
  "async-trait",
  "futures",
- "nix 0.27.1",
+ "nix 0.29.0",
  "rand",
  "regex",
  "reqwest 0.11.27",
diff --git a/Cargo.toml b/Cargo.toml
index c917a8a8fea..c30a9949e85 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1391,7 +1391,7 @@ xcm-procedural = { path = "polkadot/xcm/procedural", default-features = false }
 xcm-runtime-apis = { path = "polkadot/xcm/xcm-runtime-apis", default-features = false }
 xcm-simulator = { path = "polkadot/xcm/xcm-simulator", default-features = false }
 zeroize = { version = "1.7.0", default-features = false }
-zombienet-sdk = { version = "0.2.19" }
+zombienet-sdk = { version = "0.2.20" }
 zstd = { version = "0.12.4", default-features = false }
 
 [profile.release]
diff --git a/polkadot/zombienet-sdk-tests/Cargo.toml b/polkadot/zombienet-sdk-tests/Cargo.toml
index 120857c9a42..ba7517ddce6 100644
--- a/polkadot/zombienet-sdk-tests/Cargo.toml
+++ b/polkadot/zombienet-sdk-tests/Cargo.toml
@@ -12,6 +12,7 @@ anyhow = { workspace = true }
 codec = { workspace = true, features = ["derive"] }
 env_logger = { workspace = true }
 log = { workspace = true }
+polkadot-primitives = { workspace = true, default-features = true }
 serde = { workspace = true }
 serde_json = { workspace = true }
 subxt = { workspace = true, features = ["substrate-compat"] }
diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs
new file mode 100644
index 00000000000..42aa83d9da7
--- /dev/null
+++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs
@@ -0,0 +1,135 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Test that a parachain that uses a basic collator (like adder-collator) with elastic scaling
+// can achieve full throughput of 3 candidates per block.
+
+use anyhow::anyhow;
+
+use crate::helpers::{
+	assert_para_throughput, rococo,
+	rococo::runtime_types::{
+		pallet_broker::coretime_interface::CoreAssignment,
+		polkadot_runtime_parachains::assigner_coretime::PartsOf57600,
+	},
+};
+use polkadot_primitives::Id as ParaId;
+use serde_json::json;
+use subxt::{OnlineClient, PolkadotConfig};
+use subxt_signer::sr25519::dev;
+use zombienet_sdk::NetworkConfigBuilder;
+
+#[tokio::test(flavor = "multi_thread")]
+async fn basic_3cores_test() -> Result<(), anyhow::Error> {
+	let _ = env_logger::try_init_from_env(
+		env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"),
+	);
+
+	let images = zombienet_sdk::environment::get_images_from_env();
+
+	let config = NetworkConfigBuilder::new()
+		.with_relaychain(|r| {
+			let r = r
+				.with_chain("rococo-local")
+				.with_default_command("polkadot")
+				.with_default_image(images.polkadot.as_str())
+				.with_default_args(vec![("-lparachain=debug").into()])
+				.with_genesis_overrides(json!({
+					"configuration": {
+						"config": {
+							"scheduler_params": {
+								"num_cores": 2,
+								"max_validators_per_core": 1
+							},
+							"async_backing_params": {
+								"max_candidate_depth": 6,
+								"allowed_ancestry_len": 2
+							}
+						}
+					}
+				}))
+				// Have to set a `with_node` outside of the loop below, so that `r` has the right
+				// type.
+				.with_node(|node| node.with_name("validator-0"));
+
+			(1..4).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}"))))
+		})
+		.with_parachain(|p| {
+			p.with_id(2000)
+				.with_default_command("adder-collator")
+				.cumulus_based(false)
+				.with_default_image(images.cumulus.as_str())
+				.with_default_args(vec![("-lparachain=debug").into()])
+				.with_collator(|n| n.with_name("adder-2000"))
+		})
+		.with_parachain(|p| {
+			p.with_id(2001)
+				.with_default_command("adder-collator")
+				.cumulus_based(false)
+				.with_default_image(images.cumulus.as_str())
+				.with_default_args(vec![("-lparachain=debug").into()])
+				.with_collator(|n| n.with_name("adder-2001"))
+		})
+		.build()
+		.map_err(|e| {
+			let errs = e.into_iter().map(|e| e.to_string()).collect::<Vec<_>>().join(" ");
+			anyhow!("config errs: {errs}")
+		})?;
+
+	let spawn_fn = zombienet_sdk::environment::get_spawn_fn();
+	let network = spawn_fn(config).await?;
+
+	let relay_node = network.get_node("validator-0")?;
+
+	let relay_client: OnlineClient<PolkadotConfig> = relay_node.wait_client().await?;
+	let alice = dev::alice();
+
+	// Assign two extra cores to adder-2000.
+	relay_client
+		.tx()
+		.sign_and_submit_then_watch_default(
+			&rococo::tx()
+				.sudo()
+				.sudo(rococo::runtime_types::rococo_runtime::RuntimeCall::Utility(
+					rococo::runtime_types::pallet_utility::pallet::Call::batch {
+						calls: vec![
+							rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime(
+								rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core {
+									core: 0,
+									begin: 0,
+									assignment: vec![(CoreAssignment::Task(2000), PartsOf57600(57600))],
+									end_hint: None
+								}
+							),
+							rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime(
+								rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core {
+									core: 1,
+									begin: 0,
+									assignment: vec![(CoreAssignment::Task(2000), PartsOf57600(57600))],
+									end_hint: None
+								}
+							),
+						],
+					},
+				)),
+			&alice,
+		)
+		.await?
+		.wait_for_finalized_success()
+		.await?;
+
+	log::info!("2 more cores assigned to adder-2000");
+
+	assert_para_throughput(
+		&relay_client,
+		15,
+		[(ParaId::from(2000), 40..46), (ParaId::from(2001), 12..16)]
+			.into_iter()
+			.collect(),
+	)
+	.await?;
+
+	log::info!("Test finished successfully");
+
+	Ok(())
+}
diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs
new file mode 100644
index 00000000000..f83400d2b22
--- /dev/null
+++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs
@@ -0,0 +1,133 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Test that a paraid that doesn't use elastic scaling which acquired multiple cores does not brick
+// itself if ElasticScalingMVP feature is enabled in genesis.
+
+use anyhow::anyhow;
+
+use crate::helpers::{
+	assert_finalized_block_height, assert_para_throughput, rococo,
+	rococo::runtime_types::{
+		pallet_broker::coretime_interface::CoreAssignment,
+		polkadot_runtime_parachains::assigner_coretime::PartsOf57600,
+	},
+};
+use polkadot_primitives::{CoreIndex, Id as ParaId};
+use serde_json::json;
+use std::collections::{BTreeMap, VecDeque};
+use subxt::{OnlineClient, PolkadotConfig};
+use subxt_signer::sr25519::dev;
+use zombienet_sdk::NetworkConfigBuilder;
+
+#[tokio::test(flavor = "multi_thread")]
+async fn doesnt_break_parachains_test() -> Result<(), anyhow::Error> {
+	let _ = env_logger::try_init_from_env(
+		env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"),
+	);
+
+	let images = zombienet_sdk::environment::get_images_from_env();
+
+	let config = NetworkConfigBuilder::new()
+		.with_relaychain(|r| {
+			let r = r
+				.with_chain("rococo-local")
+				.with_default_command("polkadot")
+				.with_default_image(images.polkadot.as_str())
+				.with_default_args(vec![("-lparachain=debug").into()])
+				.with_genesis_overrides(json!({
+					"configuration": {
+						"config": {
+							"scheduler_params": {
+								"num_cores": 1,
+								"max_validators_per_core": 2
+							},
+							"async_backing_params": {
+								"max_candidate_depth": 6,
+								"allowed_ancestry_len": 2
+							}
+						}
+					}
+				}))
+				// Have to set a `with_node` outside of the loop below, so that `r` has the right
+				// type.
+				.with_node(|node| node.with_name("validator-0"));
+
+			(1..4).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}"))))
+		})
+		.with_parachain(|p| {
+			// Use rococo-parachain default, which has 6 second slot time. Also, don't use
+			// slot-based collator.
+			p.with_id(2000)
+				.with_default_command("polkadot-parachain")
+				.with_default_image(images.cumulus.as_str())
+				.with_default_args(vec![("-lparachain=debug,aura=debug").into()])
+				.with_collator(|n| n.with_name("collator-2000"))
+		})
+		.build()
+		.map_err(|e| {
+			let errs = e.into_iter().map(|e| e.to_string()).collect::<Vec<_>>().join(" ");
+			anyhow!("config errs: {errs}")
+		})?;
+
+	let spawn_fn = zombienet_sdk::environment::get_spawn_fn();
+	let network = spawn_fn(config).await?;
+
+	let relay_node = network.get_node("validator-0")?;
+	let para_node = network.get_node("collator-2000")?;
+
+	let relay_client: OnlineClient<PolkadotConfig> = relay_node.wait_client().await?;
+	let alice = dev::alice();
+
+	relay_client
+		.tx()
+		.sign_and_submit_then_watch_default(
+			&rococo::tx()
+				.sudo()
+				.sudo(rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime(
+                    rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core {
+                        core: 0,
+                        begin: 0,
+                        assignment: vec![(CoreAssignment::Task(2000), PartsOf57600(57600))],
+                        end_hint: None
+                    }
+                )),
+			&alice,
+		)
+		.await?
+		.wait_for_finalized_success()
+		.await?;
+
+	log::info!("1 more core assigned to the parachain");
+
+	let para_id = ParaId::from(2000);
+	// Expect the parachain to be making normal progress, 1 candidate backed per relay chain block.
+	assert_para_throughput(&relay_client, 15, [(para_id, 13..16)].into_iter().collect()).await?;
+
+	let para_client = para_node.wait_client().await?;
+	// Assert the parachain finalized block height is also on par with the number of backed
+	// candidates.
+	assert_finalized_block_height(&para_client, 12..16).await?;
+
+	// Sanity check that indeed the parachain has two assigned cores.
+	let cq = relay_client
+		.runtime_api()
+		.at_latest()
+		.await?
+		.call_raw::<BTreeMap<CoreIndex, VecDeque<ParaId>>>("ParachainHost_claim_queue", None)
+		.await?;
+
+	assert_eq!(
+		cq,
+		[
+			(CoreIndex(0), [para_id, para_id].into_iter().collect()),
+			(CoreIndex(1), [para_id, para_id].into_iter().collect()),
+		]
+		.into_iter()
+		.collect()
+	);
+
+	log::info!("Test finished successfully");
+
+	Ok(())
+}
diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs
index bb296a419df..9cfd5db5a09 100644
--- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs
+++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/mod.rs
@@ -1,8 +1,6 @@
 // Copyright (C) Parity Technologies (UK) Ltd.
 // SPDX-License-Identifier: Apache-2.0
 
-#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")]
-pub mod rococo {}
-
-mod helpers;
+mod basic_3cores;
+mod doesnt_break_parachains;
 mod slot_based_3cores;
diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs
index 41ec1250ecc..aa9f4132013 100644
--- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs
+++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs
@@ -6,14 +6,14 @@
 
 use anyhow::anyhow;
 
-use super::{
-	helpers::assert_para_throughput,
-	rococo,
+use crate::helpers::{
+	assert_finalized_block_height, assert_para_throughput, rococo,
 	rococo::runtime_types::{
 		pallet_broker::coretime_interface::CoreAssignment,
 		polkadot_runtime_parachains::assigner_coretime::PartsOf57600,
 	},
 };
+use polkadot_primitives::Id as ParaId;
 use serde_json::json;
 use subxt::{OnlineClient, PolkadotConfig};
 use subxt_signer::sr25519::dev;
@@ -63,7 +63,6 @@ async fn slot_based_3cores_test() -> Result<(), anyhow::Error> {
 				.with_default_command("test-parachain")
 				.with_default_image(images.cumulus.as_str())
 				.with_chain("elastic-scaling-mvp")
-				.with_default_args(vec![("--experimental-use-slot-based").into()])
 				.with_default_args(vec![
 					("--experimental-use-slot-based").into(),
 					("-lparachain=debug,aura=debug").into(),
@@ -93,6 +92,8 @@ async fn slot_based_3cores_test() -> Result<(), anyhow::Error> {
 	let network = spawn_fn(config).await?;
 
 	let relay_node = network.get_node("validator-0")?;
+	let para_node_elastic = network.get_node("collator-elastic")?;
+	let para_node_elastic_mvp = network.get_node("collator-elastic-mvp")?;
 
 	let relay_client: OnlineClient<PolkadotConfig> = relay_node.wait_client().await?;
 	let alice = dev::alice();
@@ -156,10 +157,17 @@ async fn slot_based_3cores_test() -> Result<(), anyhow::Error> {
 	assert_para_throughput(
 		&relay_client,
 		15,
-		[(2100, 39..46), (2200, 39..46)].into_iter().collect(),
+		[(ParaId::from(2100), 39..46), (ParaId::from(2200), 39..46)]
+			.into_iter()
+			.collect(),
 	)
 	.await?;
 
+	// Assert the parachain finalized block height is also on par with the number of backed
+	// candidates.
+	assert_finalized_block_height(&para_node_elastic.wait_client().await?, 36..46).await?;
+	assert_finalized_block_height(&para_node_elastic_mvp.wait_client().await?, 36..46).await?;
+
 	log::info!("Test finished successfully");
 
 	Ok(())
diff --git a/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs
new file mode 100644
index 00000000000..14f86eb130f
--- /dev/null
+++ b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs
@@ -0,0 +1,95 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Test we are producing 12-second parachain blocks if using an old collator, pre async-backing.
+
+use anyhow::anyhow;
+
+use crate::helpers::{assert_finalized_block_height, assert_para_throughput};
+use polkadot_primitives::Id as ParaId;
+use serde_json::json;
+use subxt::{OnlineClient, PolkadotConfig};
+use zombienet_sdk::NetworkConfigBuilder;
+
+#[tokio::test(flavor = "multi_thread")]
+async fn async_backing_6_seconds_rate_test() -> Result<(), anyhow::Error> {
+	let _ = env_logger::try_init_from_env(
+		env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"),
+	);
+
+	let images = zombienet_sdk::environment::get_images_from_env();
+
+	let config = NetworkConfigBuilder::new()
+		.with_relaychain(|r| {
+			let r = r
+				.with_chain("rococo-local")
+				.with_default_command("polkadot")
+				.with_default_image(images.polkadot.as_str())
+				.with_default_args(vec![("-lparachain=debug").into()])
+				.with_genesis_overrides(json!({
+					"configuration": {
+						"config": {
+							"scheduler_params": {
+								"group_rotation_frequency": 4,
+								"lookahead": 2,
+								"max_candidate_depth": 3,
+								"allowed_ancestry_len": 2,
+							},
+						}
+					}
+				}))
+				.with_node(|node| node.with_name("validator-0"));
+
+			(1..12)
+				.fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}"))))
+		})
+		.with_parachain(|p| {
+			p.with_id(2000)
+				.with_default_command("adder-collator")
+				.with_default_image(
+					std::env::var("COL_IMAGE")
+						.unwrap_or("docker.io/paritypr/colander:latest".to_string())
+						.as_str(),
+				)
+				.cumulus_based(false)
+				.with_default_args(vec![("-lparachain=debug").into()])
+				.with_collator(|n| n.with_name("collator-adder-2000"))
+		})
+		.with_parachain(|p| {
+			p.with_id(2001)
+				.with_default_command("polkadot-parachain")
+				.with_default_image(images.cumulus.as_str())
+				.with_default_args(vec![("-lparachain=debug,aura=debug").into()])
+				.with_collator(|n| n.with_name("collator-2001"))
+		})
+		.build()
+		.map_err(|e| {
+			let errs = e.into_iter().map(|e| e.to_string()).collect::<Vec<_>>().join(" ");
+			anyhow!("config errs: {errs}")
+		})?;
+
+	let spawn_fn = zombienet_sdk::environment::get_spawn_fn();
+	let network = spawn_fn(config).await?;
+
+	let relay_node = network.get_node("validator-0")?;
+	let para_node_2001 = network.get_node("collator-2001")?;
+
+	let relay_client: OnlineClient<PolkadotConfig> = relay_node.wait_client().await?;
+
+	assert_para_throughput(
+		&relay_client,
+		15,
+		[(ParaId::from(2000), 11..16), (ParaId::from(2001), 11..16)]
+			.into_iter()
+			.collect(),
+	)
+	.await?;
+
+	// Assert the parachain finalized block height is also on par with the number of backed
+	// candidates. We can only do this for the collator based on cumulus.
+	assert_finalized_block_height(&para_node_2001.wait_client().await?, 10..16).await?;
+
+	log::info!("Test finished successfully");
+
+	Ok(())
+}
diff --git a/polkadot/zombienet-sdk-tests/tests/functional/mod.rs b/polkadot/zombienet-sdk-tests/tests/functional/mod.rs
new file mode 100644
index 00000000000..ecdab38e1d2
--- /dev/null
+++ b/polkadot/zombienet-sdk-tests/tests/functional/mod.rs
@@ -0,0 +1,5 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+mod async_backing_6_seconds_rate;
+mod sync_backing;
diff --git a/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs
new file mode 100644
index 00000000000..6da45e28449
--- /dev/null
+++ b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs
@@ -0,0 +1,74 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Test we are producing 12-second parachain blocks if using an old collator, pre async-backing.
+
+use anyhow::anyhow;
+
+use crate::helpers::{assert_finalized_block_height, assert_para_throughput};
+use polkadot_primitives::Id as ParaId;
+use serde_json::json;
+use subxt::{OnlineClient, PolkadotConfig};
+use zombienet_sdk::NetworkConfigBuilder;
+
+#[tokio::test(flavor = "multi_thread")]
+async fn sync_backing_test() -> Result<(), anyhow::Error> {
+	let _ = env_logger::try_init_from_env(
+		env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"),
+	);
+
+	let images = zombienet_sdk::environment::get_images_from_env();
+
+	let config = NetworkConfigBuilder::new()
+		.with_relaychain(|r| {
+			let r = r
+				.with_chain("rococo-local")
+				.with_default_command("polkadot")
+				.with_default_image(images.polkadot.as_str())
+				.with_default_args(vec![("-lparachain=debug").into()])
+				.with_genesis_overrides(json!({
+					"configuration": {
+						"config": {
+							"scheduler_params": {
+								"group_rotation_frequency": 4,
+							},
+						}
+					}
+				}))
+				.with_node(|node| node.with_name("validator-0"));
+
+			(1..5).fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}"))))
+		})
+		.with_parachain(|p| {
+			p.with_id(2000)
+				.with_default_command("polkadot-parachain")
+				// This must be a very old polkadot-parachain image, pre async backing
+				.with_default_image(images.cumulus.as_str())
+				.with_default_args(vec![("-lparachain=debug,aura=debug").into()])
+				.with_collator(|n| n.with_name("collator-2000"))
+		})
+		.build()
+		.map_err(|e| {
+			let errs = e.into_iter().map(|e| e.to_string()).collect::<Vec<_>>().join(" ");
+			anyhow!("config errs: {errs}")
+		})?;
+
+	let spawn_fn = zombienet_sdk::environment::get_spawn_fn();
+	let network = spawn_fn(config).await?;
+
+	let relay_node = network.get_node("validator-0")?;
+	let para_node = network.get_node("collator-2000")?;
+
+	let relay_client: OnlineClient<PolkadotConfig> = relay_node.wait_client().await?;
+
+	assert_para_throughput(&relay_client, 15, [(ParaId::from(2000), 5..9)].into_iter().collect())
+		.await?;
+
+	// Assert the parachain finalized block height is also on par with the number of backed
+	// candidates.
+	assert_finalized_block_height(&para_node.wait_client().await?, 5..9).await?;
+
+	log::info!("Test finished successfully");
+
+	Ok(())
+}
diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs b/polkadot/zombienet-sdk-tests/tests/helpers/mod.rs
similarity index 65%
rename from polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs
rename to polkadot/zombienet-sdk-tests/tests/helpers/mod.rs
index 7d4ad4a1dd8..470345ca4d6 100644
--- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/helpers.rs
+++ b/polkadot/zombienet-sdk-tests/tests/helpers/mod.rs
@@ -1,19 +1,22 @@
 // Copyright (C) Parity Technologies (UK) Ltd.
 // SPDX-License-Identifier: Apache-2.0
 
-use super::rococo;
+use polkadot_primitives::Id as ParaId;
 use std::{collections::HashMap, ops::Range};
 use subxt::{OnlineClient, PolkadotConfig};
 
+#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")]
+pub mod rococo {}
+
 // Helper function for asserting the throughput of parachains (total number of backed candidates in
 // a window of relay chain blocks), after the first session change.
 pub async fn assert_para_throughput(
 	relay_client: &OnlineClient<PolkadotConfig>,
 	stop_at: u32,
-	expected_candidate_ranges: HashMap<u32, Range<u32>>,
+	expected_candidate_ranges: HashMap<ParaId, Range<u32>>,
 ) -> Result<(), anyhow::Error> {
 	let mut blocks_sub = relay_client.blocks().subscribe_finalized().await?;
-	let mut candidate_count: HashMap<u32, u32> = HashMap::new();
+	let mut candidate_count: HashMap<ParaId, u32> = HashMap::new();
 	let mut current_block_count = 0;
 	let mut had_first_session_change = false;
 
@@ -31,7 +34,7 @@ pub async fn assert_para_throughput(
 			current_block_count += 1;
 
 			for event in events.find::<rococo::para_inclusion::events::CandidateBacked>() {
-				*(candidate_count.entry(event?.0.descriptor.para_id.0).or_default()) += 1;
+				*(candidate_count.entry(event?.0.descriptor.para_id.0.into()).or_default()) += 1;
 			}
 		}
 
@@ -58,3 +61,21 @@ pub async fn assert_para_throughput(
 
 	Ok(())
 }
+
+// Helper function for retrieving the latest finalized block height and asserting it's within a
+// range.
+pub async fn assert_finalized_block_height(
+	client: &OnlineClient<PolkadotConfig>,
+	expected_range: Range<u32>,
+) -> Result<(), anyhow::Error> {
+	if let Some(block) = client.blocks().subscribe_finalized().await?.next().await {
+		let height = block?.number();
+		log::info!("Finalized block number {height}");
+
+		assert!(
+			expected_range.contains(&height),
+			"Finalized block number {height} not within range {expected_range:?}"
+		);
+	}
+	Ok(())
+}
diff --git a/polkadot/zombienet-sdk-tests/tests/lib.rs b/polkadot/zombienet-sdk-tests/tests/lib.rs
index 977e0f90b1c..9feb9775e45 100644
--- a/polkadot/zombienet-sdk-tests/tests/lib.rs
+++ b/polkadot/zombienet-sdk-tests/tests/lib.rs
@@ -1,7 +1,12 @@
 // Copyright (C) Parity Technologies (UK) Ltd.
 // SPDX-License-Identifier: Apache-2.0
 
+#[cfg(feature = "zombie-metadata")]
+mod helpers;
+
 #[cfg(feature = "zombie-metadata")]
 mod elastic_scaling;
 #[cfg(feature = "zombie-metadata")]
+mod functional;
+#[cfg(feature = "zombie-metadata")]
 mod smoke;
diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs
index 2da2436a111..59a71a83e01 100644
--- a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs
+++ b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs
@@ -10,21 +10,24 @@
 //! normal parachain runtime WILL mess things up.
 
 use anyhow::anyhow;
-#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")]
-pub mod rococo {}
 
 #[subxt::subxt(runtime_metadata_path = "metadata-files/coretime-rococo-local.scale")]
 mod coretime_rococo {}
 
-use rococo::runtime_types::{
-	staging_xcm::v4::{
-		asset::{Asset, AssetId, Assets, Fungibility},
-		junction::Junction,
-		junctions::Junctions,
-		location::Location,
+use crate::helpers::rococo::{
+	self as rococo_api,
+	runtime_types::{
+		polkadot_parachain_primitives::primitives,
+		staging_xcm::v4::{
+			asset::{Asset, AssetId, Assets, Fungibility},
+			junction::Junction,
+			junctions::Junctions,
+			location::Location,
+		},
+		xcm::{VersionedAssets, VersionedLocation},
 	},
-	xcm::{VersionedAssets, VersionedLocation},
 };
+
 use serde_json::json;
 use std::{fmt::Display, sync::Arc};
 use subxt::{events::StaticEvent, utils::AccountId32, OnlineClient, PolkadotConfig};
@@ -41,8 +44,6 @@ use coretime_rococo::{
 	},
 };
 
-use rococo::{self as rococo_api, runtime_types::polkadot_parachain_primitives::primitives};
-
 type CoretimeRuntimeCall = coretime_api::runtime_types::coretime_rococo_runtime::RuntimeCall;
 type CoretimeUtilityCall = coretime_api::runtime_types::pallet_utility::pallet::Call;
 type CoretimeBrokerCall = coretime_api::runtime_types::pallet_broker::pallet::Call;
diff --git a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml
deleted file mode 100644
index 611978a33a5..00000000000
--- a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.toml
+++ /dev/null
@@ -1,49 +0,0 @@
-[settings]
-timeout = 1000
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params]
-  max_candidate_depth = 6
-  allowed_ancestry_len = 2
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
-  max_validators_per_core = 1
-  num_cores = 3
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params]
-  max_approval_coalesce_count = 5
-
-[relaychain]
-default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}"
-chain = "rococo-local"
-default_command = "polkadot"
-
-  [relaychain.default_resources]
-  limits = { memory = "4G", cpu = "3" }
-  requests = { memory = "4G", cpu = "3" }
-
-  [[relaychain.node_groups]]
-  name = "elastic-validator"
-  count = 5
-  args = [ "-lparachain=debug,parachain::candidate-backing=trace,parachain::provisioner=trace,parachain::prospective-parachains=trace,runtime=debug"]
-
-{% for id in range(2000,2002) %}
-[[parachains]]
-id = {{id}}
-addToGenesis = true
-    [parachains.default_resources]
-    limits = { memory = "4G", cpu = "3" }
-    requests = { memory = "4G", cpu = "3" }
-
-    [parachains.collator]
-    name = "some-parachain"
-    image = "{{COL_IMAGE}}"
-    command = "adder-collator"
-    args = ["-lparachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug"]
-
-{% endfor %}
-
-# This represents the layout of the adder collator block header.
-[types.Header]
-number = "u64"
-parent_hash = "Hash"
-post_state = "Hash"
diff --git a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl b/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl
deleted file mode 100644
index d47ef8f415f..00000000000
--- a/polkadot/zombienet_tests/elastic_scaling/0001-basic-3cores-6s-blocks.zndsl
+++ /dev/null
@@ -1,28 +0,0 @@
-Description: Test with adder collator using 3 cores and async backing
-Network: ./0001-basic-3cores-6s-blocks.toml
-Creds: config
-
-# Check authority status.
-elastic-validator-0: reports node_roles is 4
-elastic-validator-1: reports node_roles is 4
-elastic-validator-2: reports node_roles is 4
-elastic-validator-3: reports node_roles is 4
-elastic-validator-4: reports node_roles is 4
-
-
-# Register 2 extra cores to this some-parachain.
-elastic-validator-0: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds
-elastic-validator-0: js-script ./assign-core.js with "1,2000,57600" return is 0 within 600 seconds
-
-# Wait for 20 relay chain blocks 
-elastic-validator-0: reports substrate_block_height{status="best"} is at least 20 within 600 seconds
-
-# Non elastic parachain should progress normally
-some-parachain-1: count of log lines containing "Parachain velocity: 1" is at least 5 within 20 seconds
-# Sanity
-some-parachain-1: count of log lines containing "Parachain velocity: 2" is 0
-
-# Parachain should progress 3 blocks per relay chain block ideally, however CI might not be
-# the most performant environment so we'd just use a lower bound of 2 blocks per RCB
-elastic-validator-0: parachain 2000 block height is at least 20 within 200 seconds
-
diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml
deleted file mode 100644
index 046d707cc1e..00000000000
--- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.toml
+++ /dev/null
@@ -1,40 +0,0 @@
-[settings]
-timeout = 1000
-bootnode = true
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config]
-  needed_approvals = 4
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
-  max_validators_per_core = 2
-  num_cores = 2
-
-[relaychain]
-default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}"
-chain = "rococo-local"
-default_command = "polkadot"
-
-[relaychain.default_resources]
-limits = { memory = "4G", cpu = "2" }
-requests = { memory = "2G", cpu = "1" }
-
-  [[relaychain.nodes]]
-  name = "alice"
-  validator = "true"
-
-  [[relaychain.node_groups]]
-  name = "validator"
-  count = 3
-  args = [ "-lparachain=debug,runtime=debug"]
-
-[[parachains]]
-id = 2000
-default_command = "polkadot-parachain"
-add_to_genesis = false
-register_para = true
-onboard_as_parachain = false
-
-  [parachains.collator]
-  name = "collator2000"
-  command = "polkadot-parachain"
-  args = [ "-lparachain=debug", "--experimental-use-slot-based" ]
diff --git a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl b/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl
deleted file mode 100644
index 0cfc29f532d..00000000000
--- a/polkadot/zombienet_tests/elastic_scaling/0002-elastic-scaling-doesnt-break-parachains.zndsl
+++ /dev/null
@@ -1,20 +0,0 @@
-Description: Test that a paraid acquiring multiple cores does not brick itself if ElasticScalingMVP feature is enabled in genesis
-Network: ./0002-elastic-scaling-doesnt-break-parachains.toml
-Creds: config
-
-# Check authority status.
-validator: reports node_roles is 4
-
-validator: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds
-
-# Ensure parachain was able to make progress.
-validator: parachain 2000 block height is at least 10 within 200 seconds
-
-# Register the second core assigned to this parachain.
-alice: js-script ./assign-core.js with "0,2000,57600" return is 0 within 600 seconds
-alice: js-script ./assign-core.js with "1,2000,57600" return is 0 within 600 seconds
-
-validator: reports substrate_block_height{status="finalized"} is at least 35 within 100 seconds
-
-# Ensure parachain is now making progress.
-validator: parachain 2000 block height is at least 30 within 200 seconds
diff --git a/polkadot/zombienet_tests/elastic_scaling/assign-core.js b/polkadot/zombienet_tests/elastic_scaling/assign-core.js
deleted file mode 120000
index eeb6402c06f..00000000000
--- a/polkadot/zombienet_tests/elastic_scaling/assign-core.js
+++ /dev/null
@@ -1 +0,0 @@
-../assign-core.js
\ No newline at end of file
diff --git a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml b/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml
deleted file mode 100644
index b776622fdce..00000000000
--- a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.toml
+++ /dev/null
@@ -1,54 +0,0 @@
-[settings]
-timeout = 1000
-
-[relaychain]
-default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}"
-chain = "rococo-local"
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config]
-  needed_approvals = 4
-  relay_vrf_modulo_samples = 6
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params]
-  max_candidate_depth = 3
-  allowed_ancestry_len = 2
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
-  lookahead = 2
-  group_rotation_frequency = 4
-
-
-[relaychain.default_resources]
-limits = { memory = "4G", cpu = "2" }
-requests = { memory = "2G", cpu = "1" }
-
-  [[relaychain.node_groups]]
-  name = "alice"
-  args = [ "-lparachain=debug" ]
-  count = 12
-
-[[parachains]]
-id = 2000
-addToGenesis = true
-genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=1"
-
-  [parachains.collator]
-  name = "collator01"
-  image = "{{COL_IMAGE}}"
-  command = "undying-collator"
-  args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=1", "--parachain-id=2000"]
-
-[[parachains]]
-id = 2001
-cumulus_based = true
-
-  [parachains.collator]
-  name = "collator02"
-  image = "{{CUMULUS_IMAGE}}"
-  command = "polkadot-parachain"
-  args = ["-lparachain=debug"]
-
-[types.Header]
-number = "u64"
-parent_hash = "Hash"
-post_state = "Hash"
\ No newline at end of file
diff --git a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.zndsl b/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.zndsl
deleted file mode 100644
index 0d01af82833..00000000000
--- a/polkadot/zombienet_tests/functional/0011-async-backing-6-seconds-rate.zndsl
+++ /dev/null
@@ -1,20 +0,0 @@
-Description: Test we are producing blocks at 6 seconds clip
-Network: ./0011-async-backing-6-seconds-rate.toml
-Creds: config
-
-# Check authority status.
-alice: reports node_roles is 4
-
-# Ensure parachains are registered.
-alice: parachain 2000 is registered within 60 seconds
-alice: parachain 2001 is registered within 60 seconds
-
-# Ensure parachains made progress.
-alice: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds
-
-# This parachains should produce blocks at 6s clip, let's assume an 8s rate, allowing for
-# some slots to be missed on slower machines
-alice: parachain 2000 block height is at least 30 within 240 seconds
-# This should already have produced the needed blocks
-alice: parachain 2001 block height is at least 30 within 6 seconds
-
diff --git a/polkadot/zombienet_tests/functional/0017-sync-backing.toml b/polkadot/zombienet_tests/functional/0017-sync-backing.toml
deleted file mode 100644
index 2550054c8da..00000000000
--- a/polkadot/zombienet_tests/functional/0017-sync-backing.toml
+++ /dev/null
@@ -1,48 +0,0 @@
-[settings]
-timeout = 1000
-
-[relaychain]
-default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}"
-chain = "rococo-local"
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params]
-  max_candidate_depth = 0
-  allowed_ancestry_len = 0
-
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
-  lookahead = 2
-  group_rotation_frequency = 4
-
-[relaychain.default_resources]
-limits = { memory = "4G", cpu = "2" }
-requests = { memory = "2G", cpu = "1" }
-
-  [[relaychain.node_groups]]
-  name = "alice"
-  args = [ "-lparachain=debug" ]
-  count = 10
-
-[[parachains]]
-id = 2000
-addToGenesis = true
-
-  [parachains.collator]
-  name = "collator01"
-  image = "{{COL_IMAGE}}"
-  command = "adder-collator"
-  args = ["-lparachain=debug"]
-
-[[parachains]]
-id = 2001
-cumulus_based = true
-
-  [parachains.collator]
-  name = "collator02"
-  image = "{{CUMULUS_IMAGE}}"
-  command = "polkadot-parachain"
-  args = ["-lparachain=debug"]
-
-[types.Header]
-number = "u64"
-parent_hash = "Hash"
-post_state = "Hash"
\ No newline at end of file
diff --git a/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl b/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl
deleted file mode 100644
index a53de784b2d..00000000000
--- a/polkadot/zombienet_tests/functional/0017-sync-backing.zndsl
+++ /dev/null
@@ -1,22 +0,0 @@
-Description: Test we are producing 12-second parachain blocks if sync backing is configured
-Network: ./0017-sync-backing.toml
-Creds: config
-
-# Check authority status.
-alice: reports node_roles is 4
-
-# Ensure parachains are registered.
-alice: parachain 2000 is registered within 60 seconds
-alice: parachain 2001 is registered within 60 seconds
-
-# Ensure parachains made progress.
-alice: reports substrate_block_height{status="finalized"} is at least 10 within 100 seconds
-
-# This parachains should produce blocks at 12s clip, let's assume an 14s rate, allowing for
-# some slots to be missed on slower machines
-alice: parachain 2000 block height is at least 21 within 300 seconds
-alice: parachain 2000 block height is lower than 25 within 2 seconds
-
-# This should already have produced the needed blocks
-alice: parachain 2001 block height is at least 21 within 10 seconds
-alice: parachain 2001 block height is lower than 25 within 2 seconds
-- 
GitLab


From baa3bcc60ddab6a700a713e241ad6599feb046dd Mon Sep 17 00:00:00 2001
From: Ludovic_Domingues <ludovic.domingues96@gmail.com>
Date: Tue, 7 Jan 2025 14:28:28 +0100
Subject: [PATCH 030/116] Fix defensive! macro to be used in umbrella crates
 (#7069)

PR for #7054

Replaced frame_support with $crate from @gui1117 's suggestion to fix
the dependency issue

---------

Co-authored-by: command-bot <>
---
 prdoc/pr_7069.prdoc                        | 10 ++++++++++
 substrate/frame/support/src/traits/misc.rs |  6 +++---
 2 files changed, 13 insertions(+), 3 deletions(-)
 create mode 100644 prdoc/pr_7069.prdoc

diff --git a/prdoc/pr_7069.prdoc b/prdoc/pr_7069.prdoc
new file mode 100644
index 00000000000..a0fc5cafb02
--- /dev/null
+++ b/prdoc/pr_7069.prdoc
@@ -0,0 +1,10 @@
+title: Fix defensive! macro to be used in umbrella crates
+doc:
+- audience: Runtime Dev
+  description: |-
+    PR for #7054
+
+    Replaced frame_support with $crate from @gui1117 's suggestion to fix the dependency issue
+crates:
+- name: frame-support
+  bump: patch
diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs
index 0dc3abdce95..9fef4383ad6 100644
--- a/substrate/frame/support/src/traits/misc.rs
+++ b/substrate/frame/support/src/traits/misc.rs
@@ -66,7 +66,7 @@ impl<T: VariantCount> Get<u32> for VariantCountOf<T> {
 #[macro_export]
 macro_rules! defensive {
 	() => {
-		frame_support::__private::log::error!(
+		$crate::__private::log::error!(
 			target: "runtime::defensive",
 			"{}",
 			$crate::traits::DEFENSIVE_OP_PUBLIC_ERROR
@@ -74,7 +74,7 @@ macro_rules! defensive {
 		debug_assert!(false, "{}", $crate::traits::DEFENSIVE_OP_INTERNAL_ERROR);
 	};
 	($error:expr $(,)?) => {
-		frame_support::__private::log::error!(
+		$crate::__private::log::error!(
 			target: "runtime::defensive",
 			"{}: {:?}",
 			$crate::traits::DEFENSIVE_OP_PUBLIC_ERROR,
@@ -83,7 +83,7 @@ macro_rules! defensive {
 		debug_assert!(false, "{}: {:?}", $crate::traits::DEFENSIVE_OP_INTERNAL_ERROR, $error);
 	};
 	($error:expr, $proof:expr $(,)?) => {
-		frame_support::__private::log::error!(
+		$crate::__private::log::error!(
 			target: "runtime::defensive",
 			"{}: {:?}: {:?}",
 			$crate::traits::DEFENSIVE_OP_PUBLIC_ERROR,
-- 
GitLab


From f4f56f6cf819472fcbab7ef367ec521f26cb85cb Mon Sep 17 00:00:00 2001
From: wmjae <wenmujia@gmail.com>
Date: Tue, 7 Jan 2025 23:11:42 +0800
Subject: [PATCH 031/116] fix typos (#7068)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Co-authored-by: Dónal Murray <donalm@seadanda.dev>
Co-authored-by: Dónal Murray <donal.murray@parity.io>
Co-authored-by: Shawn Tabrizi <shawntabrizi@gmail.com>
---
 docs/sdk/src/reference_docs/frame_benchmarking_weight.rs | 2 +-
 substrate/frame/balances/src/impl_currency.rs            | 2 +-
 substrate/frame/benchmarking/src/v1.rs                   | 2 +-
 substrate/frame/elections-phragmen/src/lib.rs            | 2 +-
 substrate/frame/recovery/src/lib.rs                      | 2 +-
 substrate/frame/support/src/storage/child.rs             | 2 +-
 substrate/frame/support/src/storage/unhashed.rs          | 2 +-
 substrate/frame/support/src/traits/preimages.rs          | 2 +-
 8 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs b/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs
index 68d7d31f67f..98192bfd2a9 100644
--- a/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs
+++ b/docs/sdk/src/reference_docs/frame_benchmarking_weight.rs
@@ -96,7 +96,7 @@
 //! Two ways exist to run the benchmarks of a runtime.
 //!
 //! 1. The old school way: Most Polkadot-SDK based nodes (such as the ones integrated in
-//!    [`templates`]) have an a `benchmark` subcommand integrated into themselves.
+//!    [`templates`]) have a `benchmark` subcommand integrated into themselves.
 //! 2. The more [`crate::reference_docs::omni_node`] compatible way of running the benchmarks would
 //!    be using [`frame-omni-bencher`] CLI, which only relies on a runtime.
 //!
diff --git a/substrate/frame/balances/src/impl_currency.rs b/substrate/frame/balances/src/impl_currency.rs
index 23feb46b72c..bc7e77c191d 100644
--- a/substrate/frame/balances/src/impl_currency.rs
+++ b/substrate/frame/balances/src/impl_currency.rs
@@ -632,7 +632,7 @@ where
 	///
 	/// This is `Polite` and thus will not repatriate any funds which would lead the total balance
 	/// to be less than the frozen amount. Returns `Ok` with the actual amount of funds moved,
-	/// which may be less than `value` since the operation is done an a `BestEffort` basis.
+	/// which may be less than `value` since the operation is done on a `BestEffort` basis.
 	fn repatriate_reserved(
 		slashed: &T::AccountId,
 		beneficiary: &T::AccountId,
diff --git a/substrate/frame/benchmarking/src/v1.rs b/substrate/frame/benchmarking/src/v1.rs
index 64f93b22cf1..99aad0301c1 100644
--- a/substrate/frame/benchmarking/src/v1.rs
+++ b/substrate/frame/benchmarking/src/v1.rs
@@ -1894,7 +1894,7 @@ macro_rules! add_benchmark {
 /// This macro allows users to easily generate a list of benchmarks for the pallets configured
 /// in the runtime.
 ///
-/// To use this macro, first create a an object to store the list:
+/// To use this macro, first create an object to store the list:
 ///
 /// ```ignore
 /// let mut list = Vec::<BenchmarkList>::new();
diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs
index effbb6e786c..fa1c48ee65e 100644
--- a/substrate/frame/elections-phragmen/src/lib.rs
+++ b/substrate/frame/elections-phragmen/src/lib.rs
@@ -616,7 +616,7 @@ pub mod pallet {
 	#[pallet::generate_deposit(pub(super) fn deposit_event)]
 	pub enum Event<T: Config> {
 		/// A new term with new_members. This indicates that enough candidates existed to run
-		/// the election, not that enough have has been elected. The inner value must be examined
+		/// the election, not that enough have been elected. The inner value must be examined
 		/// for this purpose. A `NewTerm(\[\])` indicates that some candidates got their bond
 		/// slashed and none were elected, whilst `EmptyTerm` means that no candidates existed to
 		/// begin with.
diff --git a/substrate/frame/recovery/src/lib.rs b/substrate/frame/recovery/src/lib.rs
index 4de1919cdc3..5a97b03cd23 100644
--- a/substrate/frame/recovery/src/lib.rs
+++ b/substrate/frame/recovery/src/lib.rs
@@ -403,7 +403,7 @@ pub mod pallet {
 				.map_err(|e| e.error)
 		}
 
-		/// Allow ROOT to bypass the recovery process and set an a rescuer account
+		/// Allow ROOT to bypass the recovery process and set a rescuer account
 		/// for a lost account directly.
 		///
 		/// The dispatch origin for this call must be _ROOT_.
diff --git a/substrate/frame/support/src/storage/child.rs b/substrate/frame/support/src/storage/child.rs
index 5ebba269365..7109e9213b0 100644
--- a/substrate/frame/support/src/storage/child.rs
+++ b/substrate/frame/support/src/storage/child.rs
@@ -163,7 +163,7 @@ pub fn kill_storage(child_info: &ChildInfo, limit: Option<u32>) -> KillStorageRe
 /// operating on the same prefix should pass `Some` and this value should be equal to the
 /// previous call result's `maybe_cursor` field. The only exception to this is when you can
 /// guarantee that the subsequent call is in a new block; in this case the previous call's result
-/// cursor need not be passed in an a `None` may be passed instead. This exception may be useful
+/// cursor need not be passed in and a `None` may be passed instead. This exception may be useful
 /// then making this call solely from a block-hook such as `on_initialize`.
 
 /// Returns [`MultiRemovalResults`] to inform about the result. Once the resultant `maybe_cursor`
diff --git a/substrate/frame/support/src/storage/unhashed.rs b/substrate/frame/support/src/storage/unhashed.rs
index 7f9bc93d7d8..495c50caa2d 100644
--- a/substrate/frame/support/src/storage/unhashed.rs
+++ b/substrate/frame/support/src/storage/unhashed.rs
@@ -124,7 +124,7 @@ pub fn kill_prefix(prefix: &[u8], limit: Option<u32>) -> sp_io::KillStorageResul
 /// operating on the same prefix should pass `Some` and this value should be equal to the
 /// previous call result's `maybe_cursor` field. The only exception to this is when you can
 /// guarantee that the subsequent call is in a new block; in this case the previous call's result
-/// cursor need not be passed in an a `None` may be passed instead. This exception may be useful
+/// cursor need not be passed in and a `None` may be passed instead. This exception may be useful
 /// then making this call solely from a block-hook such as `on_initialize`.
 ///
 /// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once the
diff --git a/substrate/frame/support/src/traits/preimages.rs b/substrate/frame/support/src/traits/preimages.rs
index 80020d8d008..6e46a748965 100644
--- a/substrate/frame/support/src/traits/preimages.rs
+++ b/substrate/frame/support/src/traits/preimages.rs
@@ -38,7 +38,7 @@ pub enum Bounded<T, H: Hash> {
 	/// for transitioning from legacy state. In the future we will make this a pure
 	/// `Dummy` item storing only the final `dummy` field.
 	Legacy { hash: H::Output, dummy: core::marker::PhantomData<T> },
-	/// A an bounded `Call`. Its encoding must be at most 128 bytes.
+	/// A bounded `Call`. Its encoding must be at most 128 bytes.
 	Inline(BoundedInline),
 	/// A hash of the call together with an upper limit for its size.`
 	Lookup { hash: H::Output, len: u32 },
-- 
GitLab


From a5780527041e39268fc8b05b0f3d098cde204883 Mon Sep 17 00:00:00 2001
From: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com>
Date: Tue, 7 Jan 2025 17:25:16 +0200
Subject: [PATCH 032/116] release: unset SKIP_WASM_BUILD (#7074)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

# Description

Seems like I added `SKIP_WASM_BUILD=1` 💀 for arch64 binaries, which
results in various errors like:
https://github.com/paritytech/polkadot-sdk/issues/6966. This PR unsets
the variable.

Closes #6966.

## Integration

People who found workarounds as in #6966 can consume the fixed binaries
again.

## Review Notes

I introduced SKIP_WASM_BUILD=1 for some reason for aarch64 (probably to
speed up testing) and forgot to remove it. It slipped through and
interfered with `stable2412` release artifacts. Needs backporting to
`stable2412` and then rebuilding/overwriting the aarch64 artifacts.

---------

Signed-off-by: Iulian Barbu <iulian.barbu@parity.io>
---
 .github/workflows/release-reusable-rc-buid.yml |  1 -
 prdoc/pr_7074.prdoc                            | 13 +++++++++++++
 2 files changed, 13 insertions(+), 1 deletion(-)
 create mode 100644 prdoc/pr_7074.prdoc

diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml
index 0222b2aa91e..035b547603e 100644
--- a/.github/workflows/release-reusable-rc-buid.yml
+++ b/.github/workflows/release-reusable-rc-buid.yml
@@ -149,7 +149,6 @@ jobs:
       AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
       AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
       AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
-      SKIP_WASM_BUILD: 1
     steps:
       - name: Checkout sources
         uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
diff --git a/prdoc/pr_7074.prdoc b/prdoc/pr_7074.prdoc
new file mode 100644
index 00000000000..d49e5f8d831
--- /dev/null
+++ b/prdoc/pr_7074.prdoc
@@ -0,0 +1,13 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Unset SKIP_WASM_BUILD=1 for aarch64 binaries release 
+
+doc:
+  - audience: [ Node Dev, Runtime Dev]
+    description:
+      Fix the release pipeline environment by unsetting SKIP_WASM_BUILD=1
+      so that aarch64 binaries are built so that they contain runtimes
+      accordingly.
+
+crates: [ ]
-- 
GitLab


From 645878a27115db52e5d63115699b4bbb89034067 Mon Sep 17 00:00:00 2001
From: Ludovic_Domingues <ludovic.domingues96@gmail.com>
Date: Tue, 7 Jan 2025 18:17:10 +0100
Subject: [PATCH 033/116] adding warning when using default substrateWeight in
 production (#7046)

PR for #3581
Added a cfg to show a deprecated warning message when using std

---------

Co-authored-by: command-bot <>
Co-authored-by: Adrian Catangiu <adrian@parity.io>
---
 prdoc/pr_7046.prdoc                                 | 7 +++++++
 templates/parachain/pallets/template/src/weights.rs | 6 ++++++
 2 files changed, 13 insertions(+)
 create mode 100644 prdoc/pr_7046.prdoc

diff --git a/prdoc/pr_7046.prdoc b/prdoc/pr_7046.prdoc
new file mode 100644
index 00000000000..113cc9c7aac
--- /dev/null
+++ b/prdoc/pr_7046.prdoc
@@ -0,0 +1,7 @@
+title: adding warning when using default substrateWeight in production
+doc:
+- audience: Runtime Dev
+  description: |-
+    PR for #3581
+    Added a cfg to show a deprecated warning message when using std
+crates: []
diff --git a/templates/parachain/pallets/template/src/weights.rs b/templates/parachain/pallets/template/src/weights.rs
index 9295492bc20..4d6dd5642a1 100644
--- a/templates/parachain/pallets/template/src/weights.rs
+++ b/templates/parachain/pallets/template/src/weights.rs
@@ -39,6 +39,12 @@ pub trait WeightInfo {
 }
 
 /// Weights for pallet_template using the Substrate node and recommended hardware.
+#[cfg_attr(
+    not(feature = "std"),
+    deprecated(
+        note = "SubstrateWeight is auto-generated and should not be used in production. Replace it with runtime benchmarked weights."
+    )
+)]
 pub struct SubstrateWeight<T>(PhantomData<T>);
 impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Storage: Template Something (r:0 w:1)
-- 
GitLab


From 4059282fc7b6ec965cc22a9a0df5920a4f3a4101 Mon Sep 17 00:00:00 2001
From: Alistair Singh <alistair.singh7@gmail.com>
Date: Tue, 7 Jan 2025 23:23:45 +0200
Subject: [PATCH 034/116] Snowbridge: Support bridging native ETH (#6855)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Changes:
1. Use the 0x0000000000000000000000000000000000000000 token address as
Native ETH.
2. Convert it to/from `{ parents: 2, interior:
X1(GlobalConsensus(Ethereum{chain_id: 1})) }` when encountered.

Onchain changes:
This will require a governance request to register native ETH (with the
above location) in the foreign assets pallet and make it sufficient.

Related solidity changes:
https://github.com/Snowfork/snowbridge/pull/1354

TODO:
- [x] Emulated Tests

---------

Co-authored-by: Vincent Geddes <117534+vgeddes@users.noreply.github.com>
Co-authored-by: Bastian Köcher <git@kchr.de>
Co-authored-by: Bastian Köcher <info@kchr.de>
---
 .../pallets/inbound-queue/fixtures/src/lib.rs |   1 +
 .../fixtures/src/send_native_eth.rs           |  95 +++++++++
 .../primitives/router/src/inbound/mock.rs     |  48 +++++
 .../primitives/router/src/inbound/mod.rs      |  16 +-
 .../primitives/router/src/inbound/tests.rs    |  88 ++++++--
 .../primitives/router/src/outbound/mod.rs     |   5 +
 .../primitives/router/src/outbound/tests.rs   |  40 ++++
 .../bridges/bridge-hub-rococo/src/lib.rs      |   3 +-
 .../bridges/bridge-hub-rococo/src/lib.rs      |   1 +
 .../bridge-hub-rococo/src/tests/snowbridge.rs | 196 ++++++++++++++++--
 prdoc/pr_6855.prdoc                           |  16 ++
 11 files changed, 478 insertions(+), 31 deletions(-)
 create mode 100755 bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_native_eth.rs
 create mode 100644 bridges/snowbridge/primitives/router/src/inbound/mock.rs
 create mode 100644 prdoc/pr_6855.prdoc

diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs
index 00adcdfa186..cb4232376c6 100644
--- a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs
+++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/lib.rs
@@ -3,5 +3,6 @@
 #![cfg_attr(not(feature = "std"), no_std)]
 
 pub mod register_token;
+pub mod send_native_eth;
 pub mod send_token;
 pub mod send_token_to_penpal;
diff --git a/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_native_eth.rs b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_native_eth.rs
new file mode 100755
index 00000000000..d3e8d76e6b3
--- /dev/null
+++ b/bridges/snowbridge/pallets/inbound-queue/fixtures/src/send_native_eth.rs
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2023 Snowfork <hello@snowfork.com>
+// Generated, do not edit!
+// See ethereum client README.md for instructions to generate
+
+use hex_literal::hex;
+use snowbridge_beacon_primitives::{
+	types::deneb, AncestryProof, BeaconHeader, ExecutionProof, VersionedExecutionPayloadHeader,
+};
+use snowbridge_core::inbound::{InboundQueueFixture, Log, Message, Proof};
+use sp_core::U256;
+use sp_std::vec;
+
+pub fn make_send_native_eth_message() -> InboundQueueFixture {
+	InboundQueueFixture {
+        message: Message {
+            event_log: 	Log {
+                address: hex!("87d1f7fdfee7f651fabc8bfcb6e086c278b77a7d").into(),
+                topics: vec![
+                    hex!("7153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84f").into(),
+                    hex!("c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539").into(),
+                    hex!("5f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0").into(),
+                ],
+                data: hex!("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa0000000000010000000000000000000000000000000000000000008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e8764817000000000000000000000000").into(),
+            },
+            proof: Proof {
+                receipt_proof: (vec![
+                    hex!("17cd4d05dde30703008a4f213205923630cff8e6bc9d5d95a52716bfb5551fd7").to_vec(),
+                ], vec![
+                    hex!("f903b4822080b903ae02f903aa018301a7fcb9010000000000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000000000000000000000000000000000000000000000000000001080000000000000000000000000000000000000000080000000000020000000000000000000800010100000000000000000000000000000000000200000000000000000000000000001000000040080008000000000000000000040000000021000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000220000000000000000000000000000000000000000000000000000200000000000000f9029ff9015d9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df884a024c5d2de620c6e25186ae16f6919eba93b6e2c1a33857cc419d9f3a00d6967e9a00000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000090a987b944cb1dcce5564e5fdecd7a54d3de27fea000000000000000000000000000000000000000000000000000000000000003e8b8c000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000208eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48f9013c9487d1f7fdfee7f651fabc8bfcb6e086c278b77a7df863a07153f9357c8ea496bba60bf82e67143e27b64462b49041f8e689e1b05728f84fa0c173fac324158e77fb5840738a1a541f633cbec8884c6a601c567d2b376a0539a05f7060e971b0dc81e63f0aa41831091847d97c1a4693ac450cc128c7214e65e0b8c000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005f00a736aa0000000000010000000000000000000000000000000000000000008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48000064a7b3b6e00d000000000000000000e8764817000000000000000000000000").to_vec(),
+                ]),
+                execution_proof: ExecutionProof {
+                    header: BeaconHeader {
+                        slot: 246,
+                        proposer_index: 7,
+                        parent_root: hex!("4faaac5d2fa0b8884fe1175c7cac1c92aac9eba5a20b4302edb98a56428c5974").into(),
+                        state_root: hex!("882c13f1d56df781e3444a78cae565bfa1c89822c86cdb0daea71f5351231580").into(),
+                        body_root: hex!("c47eb72204b1ca567396dacef8b0214027eb7f0789330b55166085d1f9cb4c65").into(),
+                    },
+                        ancestry_proof: Some(AncestryProof {
+                        header_branch: vec![
+                            hex!("38e2454bc93c4cfafcea772b8531e4802bbd2561366620699096dd4e591bc488").into(),
+                            hex!("3d7389fb144ccaeca8b8e1667ce1d1538dfceb50bf1e49c4b368a223f051fda3").into(),
+                            hex!("0d49c9c24137ad4d86ebca2f36a159573a68b5d5d60e317776c77cc8b6093034").into(),
+                            hex!("0fadc6735bcdc2793a5039a806fbf39984c39374ed4d272c1147e1c23df88983").into(),
+                            hex!("3a058ad4b169eebb4c754c8488d41e56a7a0e5f8b55b5ec67452a8d326585c69").into(),
+                            hex!("de200426caa9bc03f8e0033b4ef4df1db6501924b5c10fb7867e76db942b903c").into(),
+                            hex!("48b578632bc40eebb517501f179ffdd06d762c03e9383df16fc651eeddd18806").into(),
+                            hex!("98d9d6904b2a6a285db4c4ae59a07100cd38ec4d9fb7a16a10fe83ec99e6ba1d").into(),
+                            hex!("1b2bbae6e684864b714654a60778664e63ba6c3c9bed8074ec1a0380fe5042e6").into(),
+                            hex!("eb907a888eadf5a7e2bd0a3a5a9369e409c7aa688bd4cde758d5b608c6c82785").into(),
+                            hex!("ffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b").into(),
+                            hex!("6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220").into(),
+                            hex!("b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f").into(),
+                        ],
+                        finalized_block_root: hex!("440615588532ce496a93d189cb0ef1df7cf67d529faee0fd03213ce26ea115e5").into(),
+                        }),
+                    execution_header: VersionedExecutionPayloadHeader::Deneb(deneb::ExecutionPayloadHeader {
+                        parent_hash: hex!("a8c89213b7d7d2ac76462d89e6a7384374db905b657ad803d3c86f88f86c39df").into(),
+                        fee_recipient: hex!("0000000000000000000000000000000000000000").into(),
+                        state_root: hex!("a1e8175213a6a43da17fae65109245867cbc60e3ada16b8ac28c6b208761c772").into(),
+                        receipts_root: hex!("17cd4d05dde30703008a4f213205923630cff8e6bc9d5d95a52716bfb5551fd7").into(),
+                        logs_bloom: hex!("00000000000000000000000020000000000000000000004000000000000000000400000000000000000000001000000000000000000000000000000000000000000000000000000001080000000000000000000000000000000000000000080000000000020000000000000000000800010100000000000000000000000000000000000200000000000000000000000000001000000040080008000000000000000000040000000021000000002000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000220000000000000000000000000000000000000000000000000000200000000000000").into(),
+                        prev_randao: hex!("b9b26dc14ea8c57d069fde0c94ad31c2558365c3986a0c06558470f8c02e62ce").into(),
+                        block_number: 246,
+                        gas_limit: 62908420,
+                        gas_used: 108540,
+                        timestamp: 1734718384,
+                        extra_data: hex!("d983010e08846765746888676f312e32322e358664617277696e").into(),
+                        base_fee_per_gas: U256::from(7u64),
+                        block_hash: hex!("878195e2ea83c74d475363d03d41a7fbfc4026d6e5bcffb713928253984a64a7").into(),
+                        transactions_root: hex!("909139b3137666b4551b629ce6d9fb7e5e6f6def8a48d078448ec6600fe63c7f").into(),
+                        withdrawals_root: hex!("792930bbd5baac43bcc798ee49aa8185ef76bb3b44ba62b91d86ae569e4bb535").into(),
+                        blob_gas_used: 0,
+                        excess_blob_gas: 0,
+                    }),
+                    execution_branch: vec![
+                            hex!("5d78e26ea639df17c2194ff925f782b9522009d58cfc60e3d34ba79a19f8faf1").into(),
+                            hex!("b46f0c01805fe212e15907981b757e6c496b0cb06664224655613dcec82505bb").into(),
+                            hex!("db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71").into(),
+                            hex!("3d84b2809a36450186e5169995a5e3cab55d751aee90fd8456b33d871ccaa463").into(),
+                    ],
+                }
+            },
+        },
+        finalized_header: BeaconHeader {
+            slot: 608,
+            proposer_index: 3,
+            parent_root: hex!("f10c2349530dbd339a72886270e2e304bb68155af68c918c850acd9ab341350f").into(),
+            state_root: hex!("6df0ef4cbb4986a84ff0763727402b88636e6b5535022cd3ad6967b8dd799402").into(),
+            body_root: hex!("f66fc1c022f07f91c777ad5c464625fc0b43d3e7a45650567dce60011210f574").into(),
+        },
+        block_roots_root: hex!("1c0dbf54db070770f5e573b72afe0aac2b0e3cf312107d1cd73bf64d7a2ed90c").into(),
+    }
+}
diff --git a/bridges/snowbridge/primitives/router/src/inbound/mock.rs b/bridges/snowbridge/primitives/router/src/inbound/mock.rs
new file mode 100644
index 00000000000..537853b324f
--- /dev/null
+++ b/bridges/snowbridge/primitives/router/src/inbound/mock.rs
@@ -0,0 +1,48 @@
+use crate::inbound::{MessageToXcm, TokenId};
+use frame_support::parameter_types;
+use sp_runtime::{
+	traits::{IdentifyAccount, MaybeEquivalence, Verify},
+	MultiSignature,
+};
+use xcm::{latest::WESTEND_GENESIS_HASH, prelude::*};
+
+pub const CHAIN_ID: u64 = 11155111;
+pub const NETWORK: NetworkId = Ethereum { chain_id: CHAIN_ID };
+
+parameter_types! {
+	pub EthereumNetwork: NetworkId = NETWORK;
+
+	pub const CreateAssetCall: [u8;2] = [53, 0];
+	pub const CreateAssetExecutionFee: u128 = 2_000_000_000;
+	pub const CreateAssetDeposit: u128 = 100_000_000_000;
+	pub const SendTokenExecutionFee: u128 = 1_000_000_000;
+	pub const InboundQueuePalletInstance: u8 = 80;
+	pub UniversalLocation: InteriorLocation =
+		[GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)), Parachain(1002)].into();
+	pub AssetHubFromEthereum: Location = Location::new(1,[GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)),Parachain(1000)]);
+}
+
+type Signature = MultiSignature;
+type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
+type Balance = u128;
+
+pub(crate) struct MockTokenIdConvert;
+impl MaybeEquivalence<TokenId, Location> for MockTokenIdConvert {
+	fn convert(_id: &TokenId) -> Option<Location> {
+		Some(Location::parent())
+	}
+	fn convert_back(_loc: &Location) -> Option<TokenId> {
+		None
+	}
+}
+
+pub(crate) type MessageConverter = MessageToXcm<
+	CreateAssetCall,
+	CreateAssetDeposit,
+	InboundQueuePalletInstance,
+	AccountId,
+	Balance,
+	MockTokenIdConvert,
+	UniversalLocation,
+	AssetHubFromEthereum,
+>;
diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs
index bc5d401cd4f..1c210afb1f7 100644
--- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs
+++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs
@@ -2,6 +2,8 @@
 // SPDX-FileCopyrightText: 2023 Snowfork <hello@snowfork.com>
 //! Converts messages from Ethereum to XCM messages
 
+#[cfg(test)]
+mod mock;
 #[cfg(test)]
 mod tests;
 
@@ -394,10 +396,16 @@ where
 
 	// Convert ERC20 token address to a location that can be understood by Assets Hub.
 	fn convert_token_address(network: NetworkId, token: H160) -> Location {
-		Location::new(
-			2,
-			[GlobalConsensus(network), AccountKey20 { network: None, key: token.into() }],
-		)
+		// If the token is `0x0000000000000000000000000000000000000000` then return the location of
+		// native Ether.
+		if token == H160([0; 20]) {
+			Location::new(2, [GlobalConsensus(network)])
+		} else {
+			Location::new(
+				2,
+				[GlobalConsensus(network), AccountKey20 { network: None, key: token.into() }],
+			)
+		}
 	}
 
 	/// Constructs an XCM message destined for AssetHub that withdraws assets from the sovereign
diff --git a/bridges/snowbridge/primitives/router/src/inbound/tests.rs b/bridges/snowbridge/primitives/router/src/inbound/tests.rs
index 786aa594f65..11d7928602c 100644
--- a/bridges/snowbridge/primitives/router/src/inbound/tests.rs
+++ b/bridges/snowbridge/primitives/router/src/inbound/tests.rs
@@ -1,21 +1,12 @@
 use super::EthereumLocationsConverterFor;
-use crate::inbound::CallIndex;
-use frame_support::{assert_ok, parameter_types};
+use crate::inbound::{
+	mock::*, Command, ConvertMessage, Destination, MessageV1, VersionedMessage, H160,
+};
+use frame_support::assert_ok;
 use hex_literal::hex;
 use xcm::prelude::*;
 use xcm_executor::traits::ConvertLocation;
 
-const NETWORK: NetworkId = Ethereum { chain_id: 11155111 };
-
-parameter_types! {
-	pub EthereumNetwork: NetworkId = NETWORK;
-
-	pub const CreateAssetCall: CallIndex = [1, 1];
-	pub const CreateAssetExecutionFee: u128 = 123;
-	pub const CreateAssetDeposit: u128 = 891;
-	pub const SendTokenExecutionFee: u128 = 592;
-}
-
 #[test]
 fn test_ethereum_network_converts_successfully() {
 	let expected_account: [u8; 32] =
@@ -81,3 +72,74 @@ fn test_reanchor_all_assets() {
 		assert_eq!(reanchored_asset_with_ethereum_context, asset.clone());
 	}
 }
+
+#[test]
+fn test_convert_send_token_with_weth() {
+	const WETH: H160 = H160([0xff; 20]);
+	const AMOUNT: u128 = 1_000_000;
+	const FEE: u128 = 1_000;
+	const ACCOUNT_ID: [u8; 32] = [0xBA; 32];
+	const MESSAGE: VersionedMessage = VersionedMessage::V1(MessageV1 {
+		chain_id: CHAIN_ID,
+		command: Command::SendToken {
+			token: WETH,
+			destination: Destination::AccountId32 { id: ACCOUNT_ID },
+			amount: AMOUNT,
+			fee: FEE,
+		},
+	});
+	let result = MessageConverter::convert([1; 32].into(), MESSAGE);
+	assert_ok!(&result);
+	let (xcm, fee) = result.unwrap();
+	assert_eq!(FEE, fee);
+
+	let expected_assets = ReserveAssetDeposited(
+		vec![Asset {
+			id: AssetId(Location {
+				parents: 2,
+				interior: Junctions::X2(
+					[GlobalConsensus(NETWORK), AccountKey20 { network: None, key: WETH.into() }]
+						.into(),
+				),
+			}),
+			fun: Fungible(AMOUNT),
+		}]
+		.into(),
+	);
+	let actual_assets = xcm.into_iter().find(|x| matches!(x, ReserveAssetDeposited(..)));
+	assert_eq!(actual_assets, Some(expected_assets))
+}
+
+#[test]
+fn test_convert_send_token_with_eth() {
+	const ETH: H160 = H160([0x00; 20]);
+	const AMOUNT: u128 = 1_000_000;
+	const FEE: u128 = 1_000;
+	const ACCOUNT_ID: [u8; 32] = [0xBA; 32];
+	const MESSAGE: VersionedMessage = VersionedMessage::V1(MessageV1 {
+		chain_id: CHAIN_ID,
+		command: Command::SendToken {
+			token: ETH,
+			destination: Destination::AccountId32 { id: ACCOUNT_ID },
+			amount: AMOUNT,
+			fee: FEE,
+		},
+	});
+	let result = MessageConverter::convert([1; 32].into(), MESSAGE);
+	assert_ok!(&result);
+	let (xcm, fee) = result.unwrap();
+	assert_eq!(FEE, fee);
+
+	let expected_assets = ReserveAssetDeposited(
+		vec![Asset {
+			id: AssetId(Location {
+				parents: 2,
+				interior: Junctions::X1([GlobalConsensus(NETWORK)].into()),
+			}),
+			fun: Fungible(AMOUNT),
+		}]
+		.into(),
+	);
+	let actual_assets = xcm.into_iter().find(|x| matches!(x, ReserveAssetDeposited(..)));
+	assert_eq!(actual_assets, Some(expected_assets))
+}
diff --git a/bridges/snowbridge/primitives/router/src/outbound/mod.rs b/bridges/snowbridge/primitives/router/src/outbound/mod.rs
index 3b5dbdb77c8..622c4080701 100644
--- a/bridges/snowbridge/primitives/router/src/outbound/mod.rs
+++ b/bridges/snowbridge/primitives/router/src/outbound/mod.rs
@@ -289,8 +289,13 @@ where
 		let (token, amount) = match reserve_asset {
 			Asset { id: AssetId(inner_location), fun: Fungible(amount) } =>
 				match inner_location.unpack() {
+					// Get the ERC20 contract address of the token.
 					(0, [AccountKey20 { network, key }]) if self.network_matches(network) =>
 						Some((H160(*key), *amount)),
+					// If there is no ERC20 contract address in the location then signal to the
+					// gateway that is a native Ether transfer by using
+					// `0x0000000000000000000000000000000000000000` as the token address.
+					(0, []) => Some((H160([0; 20]), *amount)),
 					_ => None,
 				},
 			_ => None,
diff --git a/bridges/snowbridge/primitives/router/src/outbound/tests.rs b/bridges/snowbridge/primitives/router/src/outbound/tests.rs
index 44f81ce31b3..2a60f9f3e0e 100644
--- a/bridges/snowbridge/primitives/router/src/outbound/tests.rs
+++ b/bridges/snowbridge/primitives/router/src/outbound/tests.rs
@@ -515,6 +515,46 @@ fn xcm_converter_convert_with_wildcard_all_asset_filter_succeeds() {
 	assert_eq!(result, Ok((expected_payload, [0; 32])));
 }
 
+#[test]
+fn xcm_converter_convert_with_native_eth_succeeds() {
+	let network = BridgedNetwork::get();
+
+	let beneficiary_address: [u8; 20] = hex!("2000000000000000000000000000000000000000");
+
+	// The asset is `{ parents: 0, interior: X1(Here) }` relative to ethereum.
+	let assets: Assets = vec![Asset { id: AssetId([].into()), fun: Fungible(1000) }].into();
+	let filter: AssetFilter = Wild(All);
+
+	let message: Xcm<()> = vec![
+		WithdrawAsset(assets.clone()),
+		ClearOrigin,
+		BuyExecution { fees: assets.get(0).unwrap().clone(), weight_limit: Unlimited },
+		DepositAsset {
+			assets: filter,
+			beneficiary: AccountKey20 { network: None, key: beneficiary_address }.into(),
+		},
+		SetTopic([0; 32]),
+	]
+	.into();
+
+	let mut converter =
+		XcmConverter::<MockTokenIdConvert, ()>::new(&message, network, Default::default());
+
+	// The token address that is expected to be sent should be
+	// `0x0000000000000000000000000000000000000000`. The solidity will
+	// interpret this as a transfer of ETH.
+	let expected_payload = Command::AgentExecute {
+		agent_id: Default::default(),
+		command: AgentExecuteCommand::TransferToken {
+			token: H160([0; 20]),
+			recipient: beneficiary_address.into(),
+			amount: 1000,
+		},
+	};
+	let result = converter.convert();
+	assert_eq!(result, Ok((expected_payload, [0; 32])));
+}
+
 #[test]
 fn xcm_converter_convert_with_fees_less_than_reserve_yields_success() {
 	let network = BridgedNetwork::get();
diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs
index 5ef0993f70a..43398eb8bd4 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs
@@ -16,7 +16,8 @@
 pub mod genesis;
 
 pub use bridge_hub_rococo_runtime::{
-	xcm_config::XcmConfig as BridgeHubRococoXcmConfig, EthereumBeaconClient, EthereumInboundQueue,
+	self as bridge_hub_rococo_runtime, xcm_config::XcmConfig as BridgeHubRococoXcmConfig,
+	EthereumBeaconClient, EthereumInboundQueue,
 	ExistentialDeposit as BridgeHubRococoExistentialDeposit,
 	RuntimeOrigin as BridgeHubRococoRuntimeOrigin,
 };
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs
index 54bc395c86f..f84d42cb29f 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs
@@ -50,6 +50,7 @@ mod imports {
 			AssetHubWestendParaPallet as AssetHubWestendPallet,
 		},
 		bridge_hub_rococo_emulated_chain::{
+			bridge_hub_rococo_runtime::bridge_to_ethereum_config::EthereumGatewayAddress,
 			genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubRococoExistentialDeposit,
 			BridgeHubRococoParaPallet as BridgeHubRococoPallet, BridgeHubRococoRuntimeOrigin,
 			BridgeHubRococoXcmConfig, EthereumBeaconClient, EthereumInboundQueue,
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs
index c72d5045ddc..6364ff9fe95 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs
@@ -20,8 +20,8 @@ use hex_literal::hex;
 use rococo_westend_system_emulated_network::BridgeHubRococoParaSender as BridgeHubRococoSender;
 use snowbridge_core::{inbound::InboundQueueFixture, outbound::OperatingMode};
 use snowbridge_pallet_inbound_queue_fixtures::{
-	register_token::make_register_token_message, send_token::make_send_token_message,
-	send_token_to_penpal::make_send_token_to_penpal_message,
+	register_token::make_register_token_message, send_native_eth::make_send_native_eth_message,
+	send_token::make_send_token_message, send_token_to_penpal::make_send_token_to_penpal_message,
 };
 use snowbridge_pallet_system;
 use snowbridge_router_primitives::inbound::{
@@ -238,7 +238,7 @@ fn register_weth_token_from_ethereum_to_asset_hub() {
 /// Tests the registering of a token as an asset on AssetHub, and then subsequently sending
 /// a token from Ethereum to AssetHub.
 #[test]
-fn send_token_from_ethereum_to_asset_hub() {
+fn send_weth_token_from_ethereum_to_asset_hub() {
 	BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id().into(), INITIAL_FUND);
 
 	// Fund ethereum sovereign on AssetHub
@@ -278,7 +278,7 @@ fn send_token_from_ethereum_to_asset_hub() {
 /// Tests sending a token to a 3rd party parachain, called PenPal. The token reserve is
 /// still located on AssetHub.
 #[test]
-fn send_token_from_ethereum_to_penpal() {
+fn send_weth_from_ethereum_to_penpal() {
 	let asset_hub_sovereign = BridgeHubRococo::sovereign_account_id_of(Location::new(
 		1,
 		[Parachain(AssetHubRococo::para_id().into())],
@@ -515,6 +515,176 @@ fn send_weth_asset_from_asset_hub_to_ethereum() {
 	});
 }
 
+/// Tests the full cycle of eth transfers:
+/// - sending a token to AssetHub
+/// - returning the token to Ethereum
+#[test]
+fn send_eth_asset_from_asset_hub_to_ethereum_and_back() {
+	let ethereum_network: NetworkId = EthereumNetwork::get().into();
+	let origin_location = (Parent, Parent, ethereum_network).into();
+
+	use ahr_xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee;
+	let assethub_location = BridgeHubRococo::sibling_location_of(AssetHubRococo::para_id());
+	let assethub_sovereign = BridgeHubRococo::sovereign_account_id_of(assethub_location);
+	let ethereum_sovereign: AccountId =
+		EthereumLocationsConverterFor::<AccountId>::convert_location(&origin_location).unwrap();
+
+	AssetHubRococo::force_default_xcm_version(Some(XCM_VERSION));
+	BridgeHubRococo::force_default_xcm_version(Some(XCM_VERSION));
+	AssetHubRococo::force_xcm_version(origin_location.clone(), XCM_VERSION);
+
+	BridgeHubRococo::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]);
+	AssetHubRococo::fund_accounts(vec![
+		(AssetHubRococoReceiver::get(), INITIAL_FUND),
+		(ethereum_sovereign.clone(), INITIAL_FUND),
+	]);
+
+	// Register ETH
+	AssetHubRococo::execute_with(|| {
+		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
+		type RuntimeOrigin = <AssetHubRococo as Chain>::RuntimeOrigin;
+		assert_ok!(<AssetHubRococo as AssetHubRococoPallet>::ForeignAssets::force_create(
+			RuntimeOrigin::root(),
+			origin_location.clone(),
+			ethereum_sovereign.into(),
+			true,
+			1000,
+		));
+
+		assert_expected_events!(
+			AssetHubRococo,
+			vec![
+				RuntimeEvent::ForeignAssets(pallet_assets::Event::ForceCreated { .. }) => {},
+			]
+		);
+	});
+	const ETH_AMOUNT: u128 = 1_000_000_000_000_000_000;
+
+	BridgeHubRococo::execute_with(|| {
+		type RuntimeEvent = <BridgeHubRococo as Chain>::RuntimeEvent;
+		type RuntimeOrigin = <BridgeHubRococo as Chain>::RuntimeOrigin;
+
+		// Set the gateway. This is needed because new fixtures use a different gateway address.
+		assert_ok!(<BridgeHubRococo as Chain>::System::set_storage(
+			RuntimeOrigin::root(),
+			vec![(
+				EthereumGatewayAddress::key().to_vec(),
+				sp_core::H160(hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d")).encode(),
+			)],
+		));
+
+		// Construct SendToken message and sent to inbound queue
+		assert_ok!(send_inbound_message(make_send_native_eth_message()));
+
+		// Check that the send token message was sent using xcm
+		assert_expected_events!(
+			BridgeHubRococo,
+			vec![
+				RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {},
+			]
+		);
+	});
+
+	AssetHubRococo::execute_with(|| {
+		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
+		type RuntimeOrigin = <AssetHubRococo as Chain>::RuntimeOrigin;
+
+		let _issued_event = RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued {
+			asset_id: origin_location.clone(),
+			owner: AssetHubRococoReceiver::get().into(),
+			amount: ETH_AMOUNT,
+		});
+		// Check that AssetHub has issued the foreign asset
+		assert_expected_events!(
+			AssetHubRococo,
+			vec![
+				_issued_event => {},
+			]
+		);
+		let assets =
+			vec![Asset { id: AssetId(origin_location.clone()), fun: Fungible(ETH_AMOUNT) }];
+		let multi_assets = VersionedAssets::from(Assets::from(assets));
+
+		let destination = origin_location.clone().into();
+
+		let beneficiary = VersionedLocation::from(Location::new(
+			0,
+			[AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }],
+		));
+
+		let free_balance_before = <AssetHubRococo as AssetHubRococoPallet>::Balances::free_balance(
+			AssetHubRococoReceiver::get(),
+		);
+		// Send the Weth back to Ethereum
+		<AssetHubRococo as AssetHubRococoPallet>::PolkadotXcm::limited_reserve_transfer_assets(
+			RuntimeOrigin::signed(AssetHubRococoReceiver::get()),
+			Box::new(destination),
+			Box::new(beneficiary),
+			Box::new(multi_assets),
+			0,
+			Unlimited,
+		)
+		.unwrap();
+
+		let _burned_event = RuntimeEvent::ForeignAssets(pallet_assets::Event::Burned {
+			asset_id: origin_location.clone(),
+			owner: AssetHubRococoReceiver::get().into(),
+			balance: ETH_AMOUNT,
+		});
+		// Check that AssetHub has issued the foreign asset
+		let _destination = origin_location.clone();
+		assert_expected_events!(
+			AssetHubRococo,
+			vec![
+				_burned_event => {},
+				RuntimeEvent::PolkadotXcm(pallet_xcm::Event::Sent {
+					destination: _destination, ..
+				}) => {},
+			]
+		);
+
+		let free_balance_after = <AssetHubRococo as AssetHubRococoPallet>::Balances::free_balance(
+			AssetHubRococoReceiver::get(),
+		);
+		// Assert at least DefaultBridgeHubEthereumBaseFee charged from the sender
+		let free_balance_diff = free_balance_before - free_balance_after;
+		assert!(free_balance_diff > DefaultBridgeHubEthereumBaseFee::get());
+	});
+
+	BridgeHubRococo::execute_with(|| {
+		type RuntimeEvent = <BridgeHubRococo as Chain>::RuntimeEvent;
+		// Check that the transfer token back to Ethereum message was queue in the Ethereum
+		// Outbound Queue
+		assert_expected_events!(
+			BridgeHubRococo,
+			vec![
+				RuntimeEvent::EthereumOutboundQueue(snowbridge_pallet_outbound_queue::Event::MessageAccepted {..}) => {},
+				RuntimeEvent::EthereumOutboundQueue(snowbridge_pallet_outbound_queue::Event::MessageQueued {..}) => {},
+			]
+		);
+
+		let events = BridgeHubRococo::events();
+		// Check that the local fee was credited to the Snowbridge sovereign account
+		assert!(
+			events.iter().any(|event| matches!(
+				event,
+				RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount })
+					if *who == TREASURY_ACCOUNT.into() && *amount == 16903333
+			)),
+			"Snowbridge sovereign takes local fee."
+		);
+		// Check that the remote fee was credited to the AssetHub sovereign account
+		assert!(
+			events.iter().any(|event| matches!(
+				event,
+				RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount })
+					if *who == assethub_sovereign && *amount == 2680000000000,
+			)),
+			"AssetHub sovereign takes remote fee."
+		);
+	});
+}
+
 #[test]
 fn send_token_from_ethereum_to_asset_hub_fail_for_insufficient_fund() {
 	// Insufficient fund
@@ -565,7 +735,7 @@ fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() {
 	});
 }
 
-fn send_token_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u128) {
+fn send_weth_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u128) {
 	let ethereum_network_v5: NetworkId = EthereumNetwork::get().into();
 	let weth_asset_location: Location =
 		Location::new(2, [ethereum_network_v5.into(), AccountKey20 { network: None, key: WETH }]);
@@ -623,8 +793,8 @@ fn send_token_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u12
 }
 
 #[test]
-fn send_token_from_ethereum_to_existent_account_on_asset_hub() {
-	send_token_from_ethereum_to_asset_hub_with_fee(AssetHubRococoSender::get().into(), XCM_FEE);
+fn send_weth_from_ethereum_to_existent_account_on_asset_hub() {
+	send_weth_from_ethereum_to_asset_hub_with_fee(AssetHubRococoSender::get().into(), XCM_FEE);
 
 	AssetHubRococo::execute_with(|| {
 		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
@@ -640,8 +810,8 @@ fn send_token_from_ethereum_to_existent_account_on_asset_hub() {
 }
 
 #[test]
-fn send_token_from_ethereum_to_non_existent_account_on_asset_hub() {
-	send_token_from_ethereum_to_asset_hub_with_fee([1; 32], XCM_FEE);
+fn send_weth_from_ethereum_to_non_existent_account_on_asset_hub() {
+	send_weth_from_ethereum_to_asset_hub_with_fee([1; 32], XCM_FEE);
 
 	AssetHubRococo::execute_with(|| {
 		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
@@ -657,8 +827,8 @@ fn send_token_from_ethereum_to_non_existent_account_on_asset_hub() {
 }
 
 #[test]
-fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_insufficient_fee() {
-	send_token_from_ethereum_to_asset_hub_with_fee([1; 32], INSUFFICIENT_XCM_FEE);
+fn send_weth_from_ethereum_to_non_existent_account_on_asset_hub_with_insufficient_fee() {
+	send_weth_from_ethereum_to_asset_hub_with_fee([1; 32], INSUFFICIENT_XCM_FEE);
 
 	AssetHubRococo::execute_with(|| {
 		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
@@ -675,10 +845,10 @@ fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_insufficie
 }
 
 #[test]
-fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_sufficient_fee_but_do_not_satisfy_ed(
+fn send_weth_from_ethereum_to_non_existent_account_on_asset_hub_with_sufficient_fee_but_do_not_satisfy_ed(
 ) {
 	// On AH the xcm fee is 26_789_690 and the ED is 3_300_000
-	send_token_from_ethereum_to_asset_hub_with_fee([1; 32], 30_000_000);
+	send_weth_from_ethereum_to_asset_hub_with_fee([1; 32], 30_000_000);
 
 	AssetHubRococo::execute_with(|| {
 		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
diff --git a/prdoc/pr_6855.prdoc b/prdoc/pr_6855.prdoc
new file mode 100644
index 00000000000..a665115ce6c
--- /dev/null
+++ b/prdoc/pr_6855.prdoc
@@ -0,0 +1,16 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Snowbridge - Support bridging native ETH
+
+doc:
+  - audience: Runtime User
+    description: 
+      Support Native ETH as an asset type instead of only supporting WETH. WETH is still supported, but adds
+      support for ETH in the inbound and outbound routers.
+
+crates:
+  - name: snowbridge-router-primitives
+    bump: minor
+  - name: snowbridge-pallet-inbound-queue-fixtures
+    bump: minor
-- 
GitLab


From cdf107de700388a52a17b2fb852c98420c78278e Mon Sep 17 00:00:00 2001
From: wmjae <wenmujia@gmail.com>
Date: Thu, 9 Jan 2025 19:51:38 +0800
Subject: [PATCH 035/116] fix typo (#7096)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Co-authored-by: Dónal Murray <donalm@seadanda.dev>
---
 .../node/core/approval-voting/src/persisted_entries.rs    | 2 +-
 polkadot/node/core/pvf-checker/src/interest_view.rs       | 2 +-
 polkadot/node/network/approval-distribution/src/tests.rs  | 8 ++++----
 .../src/node/approval/approval-voting.md                  | 2 +-
 polkadot/runtime/rococo/src/xcm_config.rs                 | 2 +-
 substrate/client/allocator/src/freeing_bump.rs            | 2 +-
 substrate/client/api/src/proof_provider.rs                | 2 +-
 substrate/frame/preimage/src/lib.rs                       | 2 +-
 substrate/frame/recovery/README.md                        | 2 +-
 substrate/frame/recovery/src/lib.rs                       | 2 +-
 substrate/frame/support/src/dispatch_context.rs           | 2 +-
 substrate/primitives/api/src/lib.rs                       | 2 +-
 substrate/primitives/runtime/src/traits/mod.rs            | 2 +-
 13 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs
index d891af01c3a..a5d42d9fd6e 100644
--- a/polkadot/node/core/approval-voting/src/persisted_entries.rs
+++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs
@@ -561,7 +561,7 @@ impl BlockEntry {
 		self.distributed_assignments.resize(new_len, false);
 		self.distributed_assignments |= bitfield;
 
-		// If the an operation did not change our current bitfield, we return true.
+		// If an operation did not change our current bitfield, we return true.
 		let distributed = total_one_bits == self.distributed_assignments.count_ones();
 
 		distributed
diff --git a/polkadot/node/core/pvf-checker/src/interest_view.rs b/polkadot/node/core/pvf-checker/src/interest_view.rs
index 05a6f12de5d..617d0e0b5d8 100644
--- a/polkadot/node/core/pvf-checker/src/interest_view.rs
+++ b/polkadot/node/core/pvf-checker/src/interest_view.rs
@@ -58,7 +58,7 @@ impl PvfData {
 		Self { judgement: None, seen_in }
 	}
 
-	/// Mark a the `PvfData` as seen in the provided relay-chain block referenced by `relay_hash`.
+	/// Mark the `PvfData` as seen in the provided relay-chain block referenced by `relay_hash`.
 	pub fn seen_in(&mut self, relay_hash: Hash) {
 		self.seen_in.insert(relay_hash);
 	}
diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs
index 323b2cb08fe..5d79260e3ad 100644
--- a/polkadot/node/network/approval-distribution/src/tests.rs
+++ b/polkadot/node/network/approval-distribution/src/tests.rs
@@ -1255,7 +1255,7 @@ fn import_approval_happy_path_v1_v2_peers() {
 				}
 			);
 
-			// send the an approval from peer_b
+			// send an approval from peer_b
 			let approval = IndirectSignedApprovalVoteV2 {
 				block_hash: hash,
 				candidate_indices: candidate_index.into(),
@@ -1385,7 +1385,7 @@ fn import_approval_happy_path_v2() {
 				}
 			);
 
-			// send the an approval from peer_b
+			// send an approval from peer_b
 			let approval = IndirectSignedApprovalVoteV2 {
 				block_hash: hash,
 				candidate_indices,
@@ -1893,7 +1893,7 @@ fn import_approval_bad() {
 				.unwrap()
 				.unwrap();
 
-			// send the an approval from peer_b, we don't have an assignment yet
+			// send an approval from peer_b, we don't have an assignment yet
 			let approval = IndirectSignedApprovalVoteV2 {
 				block_hash: hash,
 				candidate_indices: candidate_index.into(),
@@ -4172,7 +4172,7 @@ fn import_versioned_approval() {
 				}
 			);
 
-			// send the an approval from peer_a
+			// send an approval from peer_a
 			let approval = IndirectSignedApprovalVote {
 				block_hash: hash,
 				candidate_index,
diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md
index 40394412d81..7e155cdf7d5 100644
--- a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md
+++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md
@@ -406,7 +406,7 @@ Some(core_index), response_sender)`
     * Construct a `IndirectSignedApprovalVote` using the information about the vote.
     * Dispatch `ApprovalDistributionMessage::DistributeApproval`.
   * ELSE
-    * Re-arm the timer with latest tick we have the send a the vote.
+    * Re-arm the timer with latest tick we have then send the vote.
 
 ### Determining Approval of Candidate
 
diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs
index 82a3136cc0d..bb77ec0000e 100644
--- a/polkadot/runtime/rococo/src/xcm_config.rs
+++ b/polkadot/runtime/rococo/src/xcm_config.rs
@@ -84,7 +84,7 @@ pub type LocalAssetTransactor = FungibleAdapter<
 	LocalCheckAccount,
 >;
 
-/// The means that we convert an the XCM message origin location into a local dispatch origin.
+/// The means that we convert the XCM message origin location into a local dispatch origin.
 type LocalOriginConverter = (
 	// A `Signed` origin of the sovereign account that the original location controls.
 	SovereignSignedViaLocation<LocationConverter, RuntimeOrigin>,
diff --git a/substrate/client/allocator/src/freeing_bump.rs b/substrate/client/allocator/src/freeing_bump.rs
index 144c0764540..405916adc3c 100644
--- a/substrate/client/allocator/src/freeing_bump.rs
+++ b/substrate/client/allocator/src/freeing_bump.rs
@@ -182,7 +182,7 @@ const NIL_MARKER: u32 = u32::MAX;
 enum Link {
 	/// Nil, denotes that there is no next element.
 	Nil,
-	/// Link to the next element represented as a pointer to the a header.
+	/// Link to the next element represented as a pointer to the header.
 	Ptr(u32),
 }
 
diff --git a/substrate/client/api/src/proof_provider.rs b/substrate/client/api/src/proof_provider.rs
index 7f60f856ae8..9043d348272 100644
--- a/substrate/client/api/src/proof_provider.rs
+++ b/substrate/client/api/src/proof_provider.rs
@@ -82,7 +82,7 @@ pub trait ProofProvider<Block: BlockT> {
 	) -> sp_blockchain::Result<Vec<(KeyValueStorageLevel, bool)>>;
 
 	/// Verify read storage proof for a set of keys.
-	/// Returns collected key-value pairs and a the nested state
+	/// Returns collected key-value pairs and the nested state
 	/// depth of current iteration or 0 if completed.
 	fn verify_range_proof(
 		&self,
diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs
index 658e7fec534..849ffddf4fb 100644
--- a/substrate/frame/preimage/src/lib.rs
+++ b/substrate/frame/preimage/src/lib.rs
@@ -236,7 +236,7 @@ pub mod pallet {
 			Self::do_unrequest_preimage(&hash)
 		}
 
-		/// Ensure that the a bulk of pre-images is upgraded.
+		/// Ensure that the bulk of pre-images is upgraded.
 		///
 		/// The caller pays no fee if at least 90% of pre-images were successfully updated.
 		#[pallet::call_index(4)]
diff --git a/substrate/frame/recovery/README.md b/substrate/frame/recovery/README.md
index fdaef5784fd..39f69140704 100644
--- a/substrate/frame/recovery/README.md
+++ b/substrate/frame/recovery/README.md
@@ -62,7 +62,7 @@ The intended life cycle of a successful recovery takes the following steps:
 
 ### Malicious Recovery Attempts
 
-Initializing a the recovery process for a recoverable account is open and
+Initializing the recovery process for a recoverable account is open and
 permissionless. However, the recovery deposit is an economic deterrent that
 should disincentivize would-be attackers from trying to maliciously recover
 accounts.
diff --git a/substrate/frame/recovery/src/lib.rs b/substrate/frame/recovery/src/lib.rs
index 5a97b03cd23..42fb641983f 100644
--- a/substrate/frame/recovery/src/lib.rs
+++ b/substrate/frame/recovery/src/lib.rs
@@ -75,7 +75,7 @@
 //!
 //! ### Malicious Recovery Attempts
 //!
-//! Initializing a the recovery process for a recoverable account is open and
+//! Initializing the recovery process for a recoverable account is open and
 //! permissionless. However, the recovery deposit is an economic deterrent that
 //! should disincentivize would-be attackers from trying to maliciously recover
 //! accounts.
diff --git a/substrate/frame/support/src/dispatch_context.rs b/substrate/frame/support/src/dispatch_context.rs
index b34c6bdada3..42776e71cb8 100644
--- a/substrate/frame/support/src/dispatch_context.rs
+++ b/substrate/frame/support/src/dispatch_context.rs
@@ -140,7 +140,7 @@ impl<T> Value<'_, T> {
 
 /// Runs the given `callback` in the dispatch context and gives access to some user defined value.
 ///
-/// Passes the a mutable reference of [`Value`] to the callback. The value will be of type `T` and
+/// Passes a mutable reference of [`Value`] to the callback. The value will be of type `T` and
 /// is identified using the [`TypeId`] of `T`. This means that `T` should be some unique type to
 /// make the value unique. If no value is set yet [`Value::get()`] and [`Value::get_mut()`] will
 /// return `None`. It is totally valid to have some `T` that is shared between different callers to
diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs
index b412d4b52fe..8909d2b2e48 100644
--- a/substrate/primitives/api/src/lib.rs
+++ b/substrate/primitives/api/src/lib.rs
@@ -666,7 +666,7 @@ pub struct CallApiAtParams<'a, Block: BlockT> {
 	pub extensions: &'a RefCell<Extensions>,
 }
 
-/// Something that can call into the an api at a given block.
+/// Something that can call into an api at a given block.
 #[cfg(feature = "std")]
 pub trait CallApiAt<Block: BlockT> {
 	/// The state backend that is used to store the block states.
diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs
index 5b6cacc7e00..8f5b484e4e3 100644
--- a/substrate/primitives/runtime/src/traits/mod.rs
+++ b/substrate/primitives/runtime/src/traits/mod.rs
@@ -1963,7 +1963,7 @@ pub trait AccountIdConversion<AccountId>: Sized {
 		Self::try_from_sub_account::<()>(a).map(|x| x.0)
 	}
 
-	/// Convert this value amalgamated with the a secondary "sub" value into an account ID,
+	/// Convert this value amalgamated with a secondary "sub" value into an account ID,
 	/// truncating any unused bytes. This is infallible.
 	///
 	/// NOTE: The account IDs from this and from `into_account` are *not* guaranteed to be distinct
-- 
GitLab


From 2f179585229880a596ab3b8b04a4be6c7db15efa Mon Sep 17 00:00:00 2001
From: seemantaggarwal <32275622+seemantaggarwal@users.noreply.github.com>
Date: Thu, 9 Jan 2025 20:18:59 +0530
Subject: [PATCH 036/116] Migrating salary pallet to use umbrella crate (#7048)

# Description

Migrating salary pallet to use umbrella crate. It is a follow-up from
https://github.com/paritytech/polkadot-sdk/pull/7025
Why did I create this new branch?
I did this, so that the unnecessary cargo fmt changes from the previous
branch are discarded and hence opened this new PR.



## Review Notes

This PR migrates pallet-salary to use the umbrella crate.

Added change: Explanation requested for why `TestExternalities` was
replaced by `TestState` as testing_prelude already includes it
`pub use sp_io::TestExternalities as TestState;`


I have also modified the defensive! macro to be compatible with umbrella
crate as it was being used in the salary pallet
---
 Cargo.lock                                    |  8 +-----
 prdoc/pr_7048.prdoc                           | 17 ++++++++++++
 substrate/frame/salary/Cargo.toml             | 26 +++---------------
 substrate/frame/salary/src/benchmarking.rs    |  7 ++---
 substrate/frame/salary/src/lib.rs             | 27 +++++--------------
 .../frame/salary/src/tests/integration.rs     | 25 +++++------------
 substrate/frame/salary/src/tests/unit.rs      | 24 ++++++-----------
 substrate/frame/salary/src/weights.rs         |  2 +-
 substrate/frame/src/lib.rs                    | 23 +++++++++-------
 9 files changed, 60 insertions(+), 99 deletions(-)
 create mode 100644 prdoc/pr_7048.prdoc

diff --git a/Cargo.lock b/Cargo.lock
index 0a22179eb3d..4e2272bdc98 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -15064,17 +15064,11 @@ dependencies = [
 name = "pallet-salary"
 version = "13.0.0"
 dependencies = [
- "frame-benchmarking 28.0.0",
- "frame-support 28.0.0",
- "frame-system 28.0.0",
  "log",
  "pallet-ranked-collective 28.0.0",
  "parity-scale-codec",
+ "polkadot-sdk-frame 0.1.0",
  "scale-info",
- "sp-arithmetic 23.0.0",
- "sp-core 28.0.0",
- "sp-io 30.0.0",
- "sp-runtime 31.0.1",
 ]
 
 [[package]]
diff --git a/prdoc/pr_7048.prdoc b/prdoc/pr_7048.prdoc
new file mode 100644
index 00000000000..0f3856bc128
--- /dev/null
+++ b/prdoc/pr_7048.prdoc
@@ -0,0 +1,17 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: '[pallet-salary] Migrate to using frame umbrella crate'
+
+doc:
+  - audience: Runtime Dev
+    description: >
+      This PR migrates the `pallet-salary` to use the FRAME umbrella crate.  
+      This is part of the ongoing effort to migrate all pallets to use the FRAME umbrella crate.  
+      The effort is tracked [here](https://github.com/paritytech/polkadot-sdk/issues/6504).
+
+crates:
+  - name: pallet-salary
+    bump: minor
+  - name: polkadot-sdk-frame
+    bump: minor
diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml
index b3ed95bf1de..626993a0547 100644
--- a/substrate/frame/salary/Cargo.toml
+++ b/substrate/frame/salary/Cargo.toml
@@ -17,43 +17,25 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { features = ["derive"], workspace = true }
-frame-benchmarking = { optional = true, workspace = true }
-frame-support = { workspace = true }
-frame-system = { workspace = true }
+frame = { workspace = true, features = ["experimental", "runtime"] }
 log = { workspace = true }
 pallet-ranked-collective = { optional = true, workspace = true }
 scale-info = { features = ["derive"], workspace = true }
-sp-arithmetic = { workspace = true }
-sp-core = { workspace = true }
-sp-io = { workspace = true }
-sp-runtime = { workspace = true }
 
 [features]
 default = ["std"]
 std = [
 	"codec/std",
-	"frame-benchmarking?/std",
-	"frame-support/experimental",
-	"frame-support/std",
-	"frame-system/std",
+	"frame/std",
 	"log/std",
 	"pallet-ranked-collective/std",
 	"scale-info/std",
-	"sp-arithmetic/std",
-	"sp-core/std",
-	"sp-io/std",
-	"sp-runtime/std",
 ]
 runtime-benchmarks = [
-	"frame-benchmarking/runtime-benchmarks",
-	"frame-support/runtime-benchmarks",
-	"frame-system/runtime-benchmarks",
+	"frame/runtime-benchmarks",
 	"pallet-ranked-collective/runtime-benchmarks",
-	"sp-runtime/runtime-benchmarks",
 ]
 try-runtime = [
-	"frame-support/try-runtime",
-	"frame-system/try-runtime",
+	"frame/try-runtime",
 	"pallet-ranked-collective?/try-runtime",
-	"sp-runtime/try-runtime",
 ]
diff --git a/substrate/frame/salary/src/benchmarking.rs b/substrate/frame/salary/src/benchmarking.rs
index aeae8d2d67f..6dfd6f6dd48 100644
--- a/substrate/frame/salary/src/benchmarking.rs
+++ b/substrate/frame/salary/src/benchmarking.rs
@@ -22,10 +22,7 @@
 use super::*;
 use crate::Pallet as Salary;
 
-use frame_benchmarking::v2::*;
-use frame_system::{Pallet as System, RawOrigin};
-use sp_core::Get;
-
+use frame::benchmarking::prelude::*;
 const SEED: u32 = 0;
 
 fn ensure_member_with_salary<T: Config<I>, I: 'static>(who: &T::AccountId) {
@@ -37,7 +34,7 @@ fn ensure_member_with_salary<T: Config<I>, I: 'static>(who: &T::AccountId) {
 	for _ in 0..255 {
 		let r = T::Members::rank_of(who).expect("prior guard ensures `who` is a member; qed");
 		if !T::Salary::get_salary(r, &who).is_zero() {
-			break
+			break;
 		}
 		T::Members::promote(who).unwrap();
 	}
diff --git a/substrate/frame/salary/src/lib.rs b/substrate/frame/salary/src/lib.rs
index efb4f5d3c54..6a843625f4a 100644
--- a/substrate/frame/salary/src/lib.rs
+++ b/substrate/frame/salary/src/lib.rs
@@ -19,20 +19,10 @@
 
 #![cfg_attr(not(feature = "std"), no_std)]
 
-use codec::{Decode, Encode, MaxEncodedLen};
 use core::marker::PhantomData;
-use scale_info::TypeInfo;
-use sp_arithmetic::traits::{Saturating, Zero};
-use sp_runtime::{Perbill, RuntimeDebug};
-
-use frame_support::{
-	defensive,
-	dispatch::DispatchResultWithPostInfo,
-	ensure,
-	traits::{
-		tokens::{GetSalary, Pay, PaymentStatus},
-		RankedMembers, RankedMembersSwapHandler,
-	},
+use frame::{
+	prelude::*,
+	traits::tokens::{GetSalary, Pay, PaymentStatus},
 };
 
 #[cfg(test)]
@@ -85,12 +75,9 @@ pub struct ClaimantStatus<CycleIndex, Balance, Id> {
 	status: ClaimState<Balance, Id>,
 }
 
-#[frame_support::pallet]
+#[frame::pallet]
 pub mod pallet {
 	use super::*;
-	use frame_support::{dispatch::Pays, pallet_prelude::*};
-	use frame_system::pallet_prelude::*;
-
 	#[pallet::pallet]
 	pub struct Pallet<T, I = ()>(PhantomData<(T, I)>);
 
@@ -460,15 +447,15 @@ impl<T: Config<I>, I: 'static>
 	) {
 		if who == new_who {
 			defensive!("Should not try to swap with self");
-			return
+			return;
 		}
 		if Claimant::<T, I>::contains_key(new_who) {
 			defensive!("Should not try to overwrite existing claimant");
-			return
+			return;
 		}
 
 		let Some(claimant) = Claimant::<T, I>::take(who) else {
-			frame_support::defensive!("Claimant should exist when swapping");
+			defensive!("Claimant should exist when swapping");
 			return;
 		};
 
diff --git a/substrate/frame/salary/src/tests/integration.rs b/substrate/frame/salary/src/tests/integration.rs
index 0c1fb8bbdcb..e4e9c8f6a31 100644
--- a/substrate/frame/salary/src/tests/integration.rs
+++ b/substrate/frame/salary/src/tests/integration.rs
@@ -19,25 +19,14 @@
 
 use crate as pallet_salary;
 use crate::*;
-use frame_support::{
-	assert_noop, assert_ok, derive_impl, hypothetically,
-	pallet_prelude::Weight,
-	parameter_types,
-	traits::{ConstU64, EitherOf, MapSuccess, NoOpPoll},
-};
+use frame::{deps::sp_io, testing_prelude::*};
 use pallet_ranked_collective::{EnsureRanked, Geometric};
-use sp_core::{ConstU16, Get};
-use sp_runtime::{
-	traits::{Convert, ReduceBy, ReplaceWithDefault},
-	BuildStorage,
-};
 
 type Rank = u16;
 type Block = frame_system::mocking::MockBlock<Test>;
 
-frame_support::construct_runtime!(
-	pub enum Test
-	{
+construct_runtime!(
+	pub struct Test {
 		System: frame_system,
 		Salary: pallet_salary,
 		Club: pallet_ranked_collective,
@@ -145,9 +134,9 @@ impl pallet_ranked_collective::Config for Test {
 	type BenchmarkSetup = Salary;
 }
 
-pub fn new_test_ext() -> sp_io::TestExternalities {
+pub fn new_test_ext() -> TestState {
 	let t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
-	let mut ext = sp_io::TestExternalities::new(t);
+	let mut ext = TestState::new(t);
 	ext.execute_with(|| System::set_block_number(1));
 	ext
 }
@@ -194,7 +183,7 @@ fn swap_exhaustive_works() {
 
 			// The events mess up the storage root:
 			System::reset_events();
-			sp_io::storage::root(sp_runtime::StateVersion::V1)
+			sp_io::storage::root(StateVersion::V1)
 		});
 
 		let root_swap = hypothetically!({
@@ -207,7 +196,7 @@ fn swap_exhaustive_works() {
 
 			// The events mess up the storage root:
 			System::reset_events();
-			sp_io::storage::root(sp_runtime::StateVersion::V1)
+			sp_io::storage::root(StateVersion::V1)
 		});
 
 		assert_eq!(root_add, root_swap);
diff --git a/substrate/frame/salary/src/tests/unit.rs b/substrate/frame/salary/src/tests/unit.rs
index db1c8b947ef..3bb7bc4adf1 100644
--- a/substrate/frame/salary/src/tests/unit.rs
+++ b/substrate/frame/salary/src/tests/unit.rs
@@ -17,23 +17,15 @@
 
 //! The crate's tests.
 
-use std::collections::BTreeMap;
-
-use core::cell::RefCell;
-use frame_support::{
-	assert_noop, assert_ok, derive_impl,
-	pallet_prelude::Weight,
-	parameter_types,
-	traits::{tokens::ConvertRank, ConstU64},
-};
-use sp_runtime::{traits::Identity, BuildStorage, DispatchResult};
-
 use crate as pallet_salary;
 use crate::*;
+use core::cell::RefCell;
+use frame::{deps::sp_runtime::traits::Identity, testing_prelude::*, traits::tokens::ConvertRank};
+use std::collections::BTreeMap;
 
-type Block = frame_system::mocking::MockBlock<Test>;
+type Block = MockBlock<Test>;
 
-frame_support::construct_runtime!(
+construct_runtime!(
 	pub enum Test
 	{
 		System: frame_system,
@@ -124,7 +116,7 @@ impl RankedMembers for TestClub {
 	}
 	fn demote(who: &Self::AccountId) -> DispatchResult {
 		CLUB.with(|club| match club.borrow().get(who) {
-			None => Err(sp_runtime::DispatchError::Unavailable),
+			None => Err(DispatchError::Unavailable),
 			Some(&0) => {
 				club.borrow_mut().remove(&who);
 				Ok(())
@@ -156,9 +148,9 @@ impl Config for Test {
 	type Budget = Budget;
 }
 
-pub fn new_test_ext() -> sp_io::TestExternalities {
+pub fn new_test_ext() -> TestState {
 	let t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
-	let mut ext = sp_io::TestExternalities::new(t);
+	let mut ext = TestState::new(t);
 	ext.execute_with(|| System::set_block_number(1));
 	ext
 }
diff --git a/substrate/frame/salary/src/weights.rs b/substrate/frame/salary/src/weights.rs
index f1cdaaa225a..43c001b30d3 100644
--- a/substrate/frame/salary/src/weights.rs
+++ b/substrate/frame/salary/src/weights.rs
@@ -46,8 +46,8 @@
 #![allow(unused_imports)]
 #![allow(missing_docs)]
 
-use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
 use core::marker::PhantomData;
+use frame::weights_prelude::*;
 
 /// Weight functions needed for `pallet_salary`.
 pub trait WeightInfo {
diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs
index 15601ebde1f..23d22683be2 100644
--- a/substrate/frame/src/lib.rs
+++ b/substrate/frame/src/lib.rs
@@ -203,8 +203,12 @@ pub mod prelude {
 	/// Dispatch types from `frame-support`, other fundamental traits
 	#[doc(no_inline)]
 	pub use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo};
-	pub use frame_support::traits::{
-		Contains, EstimateNextSessionRotation, IsSubType, OnRuntimeUpgrade, OneSessionHandler,
+	pub use frame_support::{
+		defensive, defensive_assert,
+		traits::{
+			Contains, EitherOf, EstimateNextSessionRotation, IsSubType, MapSuccess, NoOpPoll,
+			OnRuntimeUpgrade, OneSessionHandler, RankedMembers, RankedMembersSwapHandler,
+		},
 	};
 
 	/// Pallet prelude of `frame-system`.
@@ -228,11 +232,10 @@ pub mod prelude {
 	/// Runtime traits
 	#[doc(no_inline)]
 	pub use sp_runtime::traits::{
-		BlockNumberProvider, Bounded, DispatchInfoOf, Dispatchable, SaturatedConversion,
-		Saturating, StaticLookup, TrailingZeroInput,
+		BlockNumberProvider, Bounded, Convert, DispatchInfoOf, Dispatchable, ReduceBy,
+		ReplaceWithDefault, SaturatedConversion, Saturating, StaticLookup, TrailingZeroInput,
 	};
-
-	/// Other runtime types and traits
+	/// Other error/result types for runtime
 	#[doc(no_inline)]
 	pub use sp_runtime::{
 		BoundToRuntimeAppPublic, DispatchErrorWithPostInfo, DispatchResultWithInfo, TokenError,
@@ -262,7 +265,7 @@ pub mod benchmarking {
 		pub use frame_benchmarking::benchmarking::*;
 		// The system origin, which is very often needed in benchmarking code. Might be tricky only
 		// if the pallet defines its own `#[pallet::origin]` and call it `RawOrigin`.
-		pub use frame_system::RawOrigin;
+		pub use frame_system::{Pallet as System, RawOrigin};
 	}
 
 	#[deprecated(
@@ -319,7 +322,7 @@ pub mod testing_prelude {
 	/// Other helper macros from `frame_support` that help with asserting in tests.
 	pub use frame_support::{
 		assert_err, assert_err_ignore_postinfo, assert_error_encoded_size, assert_noop, assert_ok,
-		assert_storage_noop, storage_alias,
+		assert_storage_noop, hypothetically, storage_alias,
 	};
 
 	pub use frame_system::{self, mocking::*};
@@ -330,7 +333,7 @@ pub mod testing_prelude {
 	pub use sp_io::TestExternalities as TestState;
 
 	/// Commonly used runtime traits for testing.
-	pub use sp_runtime::traits::BadOrigin;
+	pub use sp_runtime::{traits::BadOrigin, StateVersion};
 }
 
 /// All of the types and tools needed to build FRAME-based runtimes.
@@ -508,7 +511,7 @@ pub mod runtime {
 	#[cfg(feature = "std")]
 	pub mod testing_prelude {
 		pub use sp_core::storage::Storage;
-		pub use sp_runtime::BuildStorage;
+		pub use sp_runtime::{BuildStorage, DispatchError};
 	}
 }
 
-- 
GitLab


From 6bfe4523acf597ef47dfdcefd11b0eee396bc5c5 Mon Sep 17 00:00:00 2001
From: Andrei Eres <eresav@me.com>
Date: Thu, 9 Jan 2025 19:20:07 +0100
Subject: [PATCH 037/116] networking-bench: Update benchmarks payload (#7056)

# Description

- Used 10 notifications and requests within the benchmarks. After moving
the network workers' initialization out of the benchmarks, it is
acceptable to use this small number without losing precision.
- Removed the 128MB payload that consumed most of the execution time.
---
 .github/workflows/benchmarks-networking.yml   |   2 +
 .../network/benches/notifications_protocol.rs |  99 ++++++++---------
 .../benches/request_response_protocol.rs      | 102 ++++++++++--------
 3 files changed, 103 insertions(+), 100 deletions(-)

diff --git a/.github/workflows/benchmarks-networking.yml b/.github/workflows/benchmarks-networking.yml
index 79494b9a015..8f4246c7954 100644
--- a/.github/workflows/benchmarks-networking.yml
+++ b/.github/workflows/benchmarks-networking.yml
@@ -92,6 +92,7 @@ jobs:
         uses: benchmark-action/github-action-benchmark@v1
         with:
           tool: "cargo"
+          name: ${{ env.BENCH }}
           output-file-path: ./charts/${{ env.BENCH }}.txt
           benchmark-data-dir-path: ./bench/${{ env.BENCH }}
           github-token: ${{ steps.app-token.outputs.token }}
@@ -103,6 +104,7 @@ jobs:
         uses: benchmark-action/github-action-benchmark@v1
         with:
           tool: "cargo"
+          name: ${{ env.BENCH }}
           output-file-path: ./charts/${{ env.BENCH }}.txt
           benchmark-data-dir-path: ./bench/${{ env.BENCH }}
           github-token: ${{ steps.app-token.outputs.token }}
diff --git a/substrate/client/network/benches/notifications_protocol.rs b/substrate/client/network/benches/notifications_protocol.rs
index 40a810d616b..a406e328d5a 100644
--- a/substrate/client/network/benches/notifications_protocol.rs
+++ b/substrate/client/network/benches/notifications_protocol.rs
@@ -36,19 +36,16 @@ use std::{sync::Arc, time::Duration};
 use substrate_test_runtime_client::runtime;
 use tokio::{sync::Mutex, task::JoinHandle};
 
-const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[
-	// (Exponent of size, number of notifications, label)
-	(6, 100, "64B"),
-	(9, 100, "512B"),
-	(12, 100, "4KB"),
-	(15, 100, "64KB"),
-];
-const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[
-	// (Exponent of size, number of notifications, label)
-	(18, 10, "256KB"),
-	(21, 10, "2MB"),
-	(24, 10, "16MB"),
-	(27, 10, "128MB"),
+const NUMBER_OF_NOTIFICATIONS: usize = 100;
+const PAYLOAD: &[(u32, &'static str)] = &[
+	// (Exponent of size, label)
+	(6, "64B"),
+	(9, "512B"),
+	(12, "4KB"),
+	(15, "64KB"),
+	(18, "256KB"),
+	(21, "2MB"),
+	(24, "16MB"),
 ];
 const MAX_SIZE: u64 = 2u64.pow(30);
 
@@ -156,12 +153,19 @@ where
 				tokio::select! {
 					Some(event) = notification_service1.next_event() => {
 						if let NotificationEvent::NotificationStreamOpened { .. } = event {
-							break;
+							// Send a 32MB notification to preheat the network
+							notification_service1.send_async_notification(&peer_id2, vec![0; 2usize.pow(25)]).await.unwrap();
 						}
 					},
 					Some(event) = notification_service2.next_event() => {
-						if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = event {
-							result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap();
+						match event {
+							NotificationEvent::ValidateInboundSubstream { result_tx, .. } => {
+								result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap();
+							},
+							NotificationEvent::NotificationReceived { .. } => {
+								break;
+							}
+							_ => {}
 						}
 					},
 				}
@@ -255,64 +259,53 @@ async fn run_with_backpressure(setup: Arc<BenchSetup>, size: usize, limit: usize
 	let _ = tokio::join!(network1, network2);
 }
 
-fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) {
+fn run_benchmark(c: &mut Criterion) {
 	let rt = tokio::runtime::Runtime::new().unwrap();
 	let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
-	let mut group = c.benchmark_group(group);
+	let mut group = c.benchmark_group("notifications_protocol");
 	group.plot_config(plot_config);
+	group.sample_size(10);
 
 	let libp2p_setup = setup_workers::<runtime::Block, runtime::Hash, NetworkWorker<_, _>>(&rt);
-	for &(exponent, limit, label) in payload.iter() {
+	for &(exponent, label) in PAYLOAD.iter() {
 		let size = 2usize.pow(exponent);
-		group.throughput(Throughput::Bytes(limit as u64 * size as u64));
-		group.bench_with_input(
-			BenchmarkId::new("libp2p/serially", label),
-			&(size, limit),
-			|b, &(size, limit)| {
-				b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit));
-			},
-		);
+		group.throughput(Throughput::Bytes(NUMBER_OF_NOTIFICATIONS as u64 * size as u64));
+		group.bench_with_input(BenchmarkId::new("libp2p/serially", label), &size, |b, &size| {
+			b.to_async(&rt)
+				.iter(|| run_serially(Arc::clone(&libp2p_setup), size, NUMBER_OF_NOTIFICATIONS));
+		});
 		group.bench_with_input(
 			BenchmarkId::new("libp2p/with_backpressure", label),
-			&(size, limit),
-			|b, &(size, limit)| {
-				b.to_async(&rt)
-					.iter(|| run_with_backpressure(Arc::clone(&libp2p_setup), size, limit));
+			&size,
+			|b, &size| {
+				b.to_async(&rt).iter(|| {
+					run_with_backpressure(Arc::clone(&libp2p_setup), size, NUMBER_OF_NOTIFICATIONS)
+				});
 			},
 		);
 	}
 	drop(libp2p_setup);
 
 	let litep2p_setup = setup_workers::<runtime::Block, runtime::Hash, Litep2pNetworkBackend>(&rt);
-	for &(exponent, limit, label) in payload.iter() {
+	for &(exponent, label) in PAYLOAD.iter() {
 		let size = 2usize.pow(exponent);
-		group.throughput(Throughput::Bytes(limit as u64 * size as u64));
-		group.bench_with_input(
-			BenchmarkId::new("litep2p/serially", label),
-			&(size, limit),
-			|b, &(size, limit)| {
-				b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit));
-			},
-		);
+		group.throughput(Throughput::Bytes(NUMBER_OF_NOTIFICATIONS as u64 * size as u64));
+		group.bench_with_input(BenchmarkId::new("litep2p/serially", label), &size, |b, &size| {
+			b.to_async(&rt)
+				.iter(|| run_serially(Arc::clone(&litep2p_setup), size, NUMBER_OF_NOTIFICATIONS));
+		});
 		group.bench_with_input(
 			BenchmarkId::new("litep2p/with_backpressure", label),
-			&(size, limit),
-			|b, &(size, limit)| {
-				b.to_async(&rt)
-					.iter(|| run_with_backpressure(Arc::clone(&litep2p_setup), size, limit));
+			&size,
+			|b, &size| {
+				b.to_async(&rt).iter(|| {
+					run_with_backpressure(Arc::clone(&litep2p_setup), size, NUMBER_OF_NOTIFICATIONS)
+				});
 			},
 		);
 	}
 	drop(litep2p_setup);
 }
 
-fn run_benchmark_with_small_payload(c: &mut Criterion) {
-	run_benchmark(c, SMALL_PAYLOAD, "notifications_protocol/small_payload");
-}
-
-fn run_benchmark_with_large_payload(c: &mut Criterion) {
-	run_benchmark(c, LARGE_PAYLOAD, "notifications_protocol/large_payload");
-}
-
-criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload);
+criterion_group!(benches, run_benchmark);
 criterion_main!(benches);
diff --git a/substrate/client/network/benches/request_response_protocol.rs b/substrate/client/network/benches/request_response_protocol.rs
index 85381112b75..97c6d72ddf1 100644
--- a/substrate/client/network/benches/request_response_protocol.rs
+++ b/substrate/client/network/benches/request_response_protocol.rs
@@ -37,19 +37,16 @@ use substrate_test_runtime_client::runtime;
 use tokio::{sync::Mutex, task::JoinHandle};
 
 const MAX_SIZE: u64 = 2u64.pow(30);
-const SMALL_PAYLOAD: &[(u32, usize, &'static str)] = &[
-	// (Exponent of size, number of requests, label)
-	(6, 100, "64B"),
-	(9, 100, "512B"),
-	(12, 100, "4KB"),
-	(15, 100, "64KB"),
-];
-const LARGE_PAYLOAD: &[(u32, usize, &'static str)] = &[
-	// (Exponent of size, number of requests, label)
-	(18, 10, "256KB"),
-	(21, 10, "2MB"),
-	(24, 10, "16MB"),
-	(27, 10, "128MB"),
+const NUMBER_OF_REQUESTS: usize = 100;
+const PAYLOAD: &[(u32, &'static str)] = &[
+	// (Exponent of size, label)
+	(6, "64B"),
+	(9, "512B"),
+	(12, "4KB"),
+	(15, "64KB"),
+	(18, "256KB"),
+	(21, "2MB"),
+	(24, "16MB"),
 ];
 
 pub fn create_network_worker<B, H, N>() -> (
@@ -154,6 +151,21 @@ where
 	let handle1 = tokio::spawn(worker1.run());
 	let handle2 = tokio::spawn(worker2.run());
 
+	let _ = tokio::spawn({
+		let rx2 = rx2.clone();
+
+		async move {
+			let req = rx2.recv().await.unwrap();
+			req.pending_response
+				.send(OutgoingResponse {
+					result: Ok(vec![0; 2usize.pow(25)]),
+					reputation_changes: vec![],
+					sent_feedback: None,
+				})
+				.unwrap();
+		}
+	});
+
 	let ready = tokio::spawn({
 		let network_service1 = Arc::clone(&network_service1);
 
@@ -165,6 +177,16 @@ where
 				network_service2.listen_addresses()[0].clone()
 			};
 			network_service1.add_known_address(peer_id2, listen_address2.into());
+			let _ = network_service1
+				.request(
+					peer_id2.into(),
+					"/request-response/1".into(),
+					vec![0; 2],
+					None,
+					IfDisconnected::TryConnect,
+				)
+				.await
+				.unwrap();
 		}
 	});
 
@@ -210,8 +232,8 @@ async fn run_serially(setup: Arc<BenchSetup>, size: usize, limit: usize) {
 		async move {
 			loop {
 				tokio::select! {
-					res = rx2.recv() => {
-						let IncomingRequest { pending_response, .. } = res.unwrap();
+					req = rx2.recv() => {
+						let IncomingRequest { pending_response, .. } = req.unwrap();
 						pending_response.send(OutgoingResponse {
 							result: Ok(vec![0; size]),
 							reputation_changes: vec![],
@@ -269,49 +291,35 @@ async fn run_with_backpressure(setup: Arc<BenchSetup>, size: usize, limit: usize
 	let _ = tokio::join!(network1, network2);
 }
 
-fn run_benchmark(c: &mut Criterion, payload: &[(u32, usize, &'static str)], group: &str) {
+fn run_benchmark(c: &mut Criterion) {
 	let rt = tokio::runtime::Runtime::new().unwrap();
 	let plot_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
-	let mut group = c.benchmark_group(group);
+	let mut group = c.benchmark_group("request_response_protocol");
 	group.plot_config(plot_config);
+	group.sample_size(10);
 
 	let libp2p_setup = setup_workers::<runtime::Block, runtime::Hash, NetworkWorker<_, _>>(&rt);
-	for &(exponent, limit, label) in payload.iter() {
+	for &(exponent, label) in PAYLOAD.iter() {
 		let size = 2usize.pow(exponent);
-		group.throughput(Throughput::Bytes(limit as u64 * size as u64));
-		group.bench_with_input(
-			BenchmarkId::new("libp2p/serially", label),
-			&(size, limit),
-			|b, &(size, limit)| {
-				b.to_async(&rt).iter(|| run_serially(Arc::clone(&libp2p_setup), size, limit));
-			},
-		);
+		group.throughput(Throughput::Bytes(NUMBER_OF_REQUESTS as u64 * size as u64));
+		group.bench_with_input(BenchmarkId::new("libp2p/serially", label), &size, |b, &size| {
+			b.to_async(&rt)
+				.iter(|| run_serially(Arc::clone(&libp2p_setup), size, NUMBER_OF_REQUESTS));
+		});
 	}
 	drop(libp2p_setup);
 
-	// TODO: NetworkRequest::request should be implemented for Litep2pNetworkService
 	let litep2p_setup = setup_workers::<runtime::Block, runtime::Hash, Litep2pNetworkBackend>(&rt);
-	// for &(exponent, limit, label) in payload.iter() {
-	// 	let size = 2usize.pow(exponent);
-	// 	group.throughput(Throughput::Bytes(limit as u64 * size as u64));
-	// 	group.bench_with_input(
-	// 		BenchmarkId::new("litep2p/serially", label),
-	// 		&(size, limit),
-	// 		|b, &(size, limit)| {
-	// 			b.to_async(&rt).iter(|| run_serially(Arc::clone(&litep2p_setup), size, limit));
-	// 		},
-	// 	);
-	// }
+	for &(exponent, label) in PAYLOAD.iter() {
+		let size = 2usize.pow(exponent);
+		group.throughput(Throughput::Bytes(NUMBER_OF_REQUESTS as u64 * size as u64));
+		group.bench_with_input(BenchmarkId::new("litep2p/serially", label), &size, |b, &size| {
+			b.to_async(&rt)
+				.iter(|| run_serially(Arc::clone(&litep2p_setup), size, NUMBER_OF_REQUESTS));
+		});
+	}
 	drop(litep2p_setup);
 }
 
-fn run_benchmark_with_small_payload(c: &mut Criterion) {
-	run_benchmark(c, SMALL_PAYLOAD, "request_response_benchmark/small_payload");
-}
-
-fn run_benchmark_with_large_payload(c: &mut Criterion) {
-	run_benchmark(c, LARGE_PAYLOAD, "request_response_benchmark/large_payload");
-}
-
-criterion_group!(benches, run_benchmark_with_small_payload, run_benchmark_with_large_payload);
+criterion_group!(benches, run_benchmark);
 criterion_main!(benches);
-- 
GitLab


From e051f3edd3d6a0699a9261c8f8985d2e8e95c276 Mon Sep 17 00:00:00 2001
From: Francisco Aguirre <franciscoaguirreperez@gmail.com>
Date: Thu, 9 Jan 2025 23:20:01 -0300
Subject: [PATCH 038/116] Add XCM benchmarks to collectives-westend (#6820)

Collectives-westend was using `FixedWeightBounds`, meaning the same
weight per instruction. Added proper benchmarks.

---------

Co-authored-by: GitHub Action <action@github.com>
Co-authored-by: Branislav Kontur <bkontur@gmail.com>
---
 .../collectives-westend/src/weights/mod.rs    |   1 +
 .../src/weights/xcm/mod.rs                    | 273 ++++++++++++++
 .../xcm/pallet_xcm_benchmarks_fungible.rs     | 211 +++++++++++
 .../xcm/pallet_xcm_benchmarks_generic.rs      | 355 ++++++++++++++++++
 .../collectives-westend/src/xcm_config.rs     |  30 +-
 prdoc/pr_6820.prdoc                           |   8 +
 6 files changed, 864 insertions(+), 14 deletions(-)
 create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs
 create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
 create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs
 create mode 100644 prdoc/pr_6820.prdoc

diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs
index a1663dc98a3..ce85d23b21c 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs
@@ -47,6 +47,7 @@ pub mod pallet_utility;
 pub mod pallet_xcm;
 pub mod paritydb_weights;
 pub mod rocksdb_weights;
+pub mod xcm;
 
 pub use block_weights::constants::BlockExecutionWeight;
 pub use extrinsic_weights::constants::ExtrinsicBaseWeight;
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs
new file mode 100644
index 00000000000..d73ce8c440f
--- /dev/null
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs
@@ -0,0 +1,273 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+mod pallet_xcm_benchmarks_fungible;
+mod pallet_xcm_benchmarks_generic;
+
+use crate::{xcm_config::MaxAssetsIntoHolding, Runtime};
+use alloc::vec::Vec;
+use frame_support::weights::Weight;
+use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight;
+use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric;
+use sp_runtime::BoundedVec;
+use xcm::{
+	latest::{prelude::*, AssetTransferFilter},
+	DoubleEncoded,
+};
+
+trait WeighAssets {
+	fn weigh_assets(&self, weight: Weight) -> Weight;
+}
+
+// Collectives only knows about WND.
+const MAX_ASSETS: u64 = 1;
+
+impl WeighAssets for AssetFilter {
+	fn weigh_assets(&self, weight: Weight) -> Weight {
+		match self {
+			Self::Definite(assets) => weight.saturating_mul(assets.inner().iter().count() as u64),
+			Self::Wild(asset) => match asset {
+				All => weight.saturating_mul(MAX_ASSETS),
+				AllOf { fun, .. } => match fun {
+					WildFungibility::Fungible => weight,
+					// Magic number 2 has to do with the fact that we could have up to 2 times
+					// MaxAssetsIntoHolding in the worst-case scenario.
+					WildFungibility::NonFungible =>
+						weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64),
+				},
+				AllCounted(count) => weight.saturating_mul(MAX_ASSETS.min(*count as u64)),
+				AllOfCounted { count, .. } => weight.saturating_mul(MAX_ASSETS.min(*count as u64)),
+			},
+		}
+	}
+}
+
+impl WeighAssets for Assets {
+	fn weigh_assets(&self, weight: Weight) -> Weight {
+		weight.saturating_mul(self.inner().iter().count() as u64)
+	}
+}
+
+pub struct CollectivesWestendXcmWeight<Call>(core::marker::PhantomData<Call>);
+impl<Call> XcmWeightInfo<Call> for CollectivesWestendXcmWeight<Call> {
+	fn withdraw_asset(assets: &Assets) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::withdraw_asset())
+	}
+	fn reserve_asset_deposited(assets: &Assets) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::reserve_asset_deposited())
+	}
+	fn receive_teleported_asset(assets: &Assets) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::receive_teleported_asset())
+	}
+	fn query_response(
+		_query_id: &u64,
+		_response: &Response,
+		_max_weight: &Weight,
+		_querier: &Option<Location>,
+	) -> Weight {
+		XcmGeneric::<Runtime>::query_response()
+	}
+	fn transfer_asset(assets: &Assets, _dest: &Location) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::transfer_asset())
+	}
+	fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::transfer_reserve_asset())
+	}
+	fn transact(
+		_origin_type: &OriginKind,
+		_fallback_max_weight: &Option<Weight>,
+		_call: &DoubleEncoded<Call>,
+	) -> Weight {
+		XcmGeneric::<Runtime>::transact()
+	}
+	fn hrmp_new_channel_open_request(
+		_sender: &u32,
+		_max_message_size: &u32,
+		_max_capacity: &u32,
+	) -> Weight {
+		// XCM Executor does not currently support HRMP channel operations
+		Weight::MAX
+	}
+	fn hrmp_channel_accepted(_recipient: &u32) -> Weight {
+		// XCM Executor does not currently support HRMP channel operations
+		Weight::MAX
+	}
+	fn hrmp_channel_closing(_initiator: &u32, _sender: &u32, _recipient: &u32) -> Weight {
+		// XCM Executor does not currently support HRMP channel operations
+		Weight::MAX
+	}
+	fn clear_origin() -> Weight {
+		XcmGeneric::<Runtime>::clear_origin()
+	}
+	fn descend_origin(_who: &InteriorLocation) -> Weight {
+		XcmGeneric::<Runtime>::descend_origin()
+	}
+	fn report_error(_query_response_info: &QueryResponseInfo) -> Weight {
+		XcmGeneric::<Runtime>::report_error()
+	}
+
+	fn deposit_asset(assets: &AssetFilter, _dest: &Location) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::deposit_asset())
+	}
+	fn deposit_reserve_asset(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::deposit_reserve_asset())
+	}
+	fn exchange_asset(_give: &AssetFilter, _receive: &Assets, _maximal: &bool) -> Weight {
+		Weight::MAX
+	}
+	fn initiate_reserve_withdraw(
+		assets: &AssetFilter,
+		_reserve: &Location,
+		_xcm: &Xcm<()>,
+	) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::initiate_reserve_withdraw())
+	}
+	fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight {
+		assets.weigh_assets(XcmFungibleWeight::<Runtime>::initiate_teleport())
+	}
+	fn initiate_transfer(
+		_dest: &Location,
+		remote_fees: &Option<AssetTransferFilter>,
+		_preserve_origin: &bool,
+		assets: &Vec<AssetTransferFilter>,
+		_xcm: &Xcm<()>,
+	) -> Weight {
+		let mut weight = if let Some(remote_fees) = remote_fees {
+			let fees = remote_fees.inner();
+			fees.weigh_assets(XcmFungibleWeight::<Runtime>::initiate_transfer())
+		} else {
+			Weight::zero()
+		};
+		for asset_filter in assets {
+			let assets = asset_filter.inner();
+			let extra = assets.weigh_assets(XcmFungibleWeight::<Runtime>::initiate_transfer());
+			weight = weight.saturating_add(extra);
+		}
+		weight
+	}
+	fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight {
+		XcmGeneric::<Runtime>::report_holding()
+	}
+	fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight {
+		XcmGeneric::<Runtime>::buy_execution()
+	}
+	fn pay_fees(_asset: &Asset) -> Weight {
+		XcmGeneric::<Runtime>::pay_fees()
+	}
+	fn refund_surplus() -> Weight {
+		XcmGeneric::<Runtime>::refund_surplus()
+	}
+	fn set_error_handler(_xcm: &Xcm<Call>) -> Weight {
+		XcmGeneric::<Runtime>::set_error_handler()
+	}
+	fn set_appendix(_xcm: &Xcm<Call>) -> Weight {
+		XcmGeneric::<Runtime>::set_appendix()
+	}
+	fn clear_error() -> Weight {
+		XcmGeneric::<Runtime>::clear_error()
+	}
+	fn set_hints(hints: &BoundedVec<Hint, HintNumVariants>) -> Weight {
+		let mut weight = Weight::zero();
+		for hint in hints {
+			match hint {
+				AssetClaimer { .. } => {
+					weight = weight.saturating_add(XcmGeneric::<Runtime>::asset_claimer());
+				},
+			}
+		}
+		weight
+	}
+	fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight {
+		XcmGeneric::<Runtime>::claim_asset()
+	}
+	fn trap(_code: &u64) -> Weight {
+		XcmGeneric::<Runtime>::trap()
+	}
+	fn subscribe_version(_query_id: &QueryId, _max_response_weight: &Weight) -> Weight {
+		XcmGeneric::<Runtime>::subscribe_version()
+	}
+	fn unsubscribe_version() -> Weight {
+		XcmGeneric::<Runtime>::unsubscribe_version()
+	}
+	fn burn_asset(assets: &Assets) -> Weight {
+		assets.weigh_assets(XcmGeneric::<Runtime>::burn_asset())
+	}
+	fn expect_asset(assets: &Assets) -> Weight {
+		assets.weigh_assets(XcmGeneric::<Runtime>::expect_asset())
+	}
+	fn expect_origin(_origin: &Option<Location>) -> Weight {
+		XcmGeneric::<Runtime>::expect_origin()
+	}
+	fn expect_error(_error: &Option<(u32, XcmError)>) -> Weight {
+		XcmGeneric::<Runtime>::expect_error()
+	}
+	fn expect_transact_status(_transact_status: &MaybeErrorCode) -> Weight {
+		XcmGeneric::<Runtime>::expect_transact_status()
+	}
+	fn query_pallet(_module_name: &Vec<u8>, _response_info: &QueryResponseInfo) -> Weight {
+		XcmGeneric::<Runtime>::query_pallet()
+	}
+	fn expect_pallet(
+		_index: &u32,
+		_name: &Vec<u8>,
+		_module_name: &Vec<u8>,
+		_crate_major: &u32,
+		_min_crate_minor: &u32,
+	) -> Weight {
+		XcmGeneric::<Runtime>::expect_pallet()
+	}
+	fn report_transact_status(_response_info: &QueryResponseInfo) -> Weight {
+		XcmGeneric::<Runtime>::report_transact_status()
+	}
+	fn clear_transact_status() -> Weight {
+		XcmGeneric::<Runtime>::clear_transact_status()
+	}
+	fn universal_origin(_: &Junction) -> Weight {
+		Weight::MAX
+	}
+	fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight {
+		Weight::MAX
+	}
+	fn lock_asset(_: &Asset, _: &Location) -> Weight {
+		Weight::MAX
+	}
+	fn unlock_asset(_: &Asset, _: &Location) -> Weight {
+		Weight::MAX
+	}
+	fn note_unlockable(_: &Asset, _: &Location) -> Weight {
+		Weight::MAX
+	}
+	fn request_unlock(_: &Asset, _: &Location) -> Weight {
+		Weight::MAX
+	}
+	fn set_fees_mode(_: &bool) -> Weight {
+		XcmGeneric::<Runtime>::set_fees_mode()
+	}
+	fn set_topic(_topic: &[u8; 32]) -> Weight {
+		XcmGeneric::<Runtime>::set_topic()
+	}
+	fn clear_topic() -> Weight {
+		XcmGeneric::<Runtime>::clear_topic()
+	}
+	fn alias_origin(_: &Location) -> Weight {
+		XcmGeneric::<Runtime>::alias_origin()
+	}
+	fn unpaid_execution(_: &WeightLimit, _: &Option<Location>) -> Weight {
+		XcmGeneric::<Runtime>::unpaid_execution()
+	}
+	fn execute_with_origin(_: &Option<InteriorLocation>, _: &Xcm<Call>) -> Weight {
+		XcmGeneric::<Runtime>::execute_with_origin()
+	}
+}
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
new file mode 100644
index 00000000000..00826cbb8d7
--- /dev/null
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
@@ -0,0 +1,211 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `pallet_xcm_benchmarks::fungible`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: Compiled, CHAIN: Some("collectives-westend-dev"), DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_xcm_benchmarks::fungible
+// --chain=collectives-westend-dev
+// --header=./cumulus/file_header.txt
+// --template=./cumulus/templates/xcm-bench-template.hbs
+// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weights for `pallet_xcm_benchmarks::fungible`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo<T> {
+	// Storage: `System::Account` (r:1 w:1)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	pub fn withdraw_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `101`
+		//  Estimated: `3593`
+		// Minimum execution time: 30_401_000 picoseconds.
+		Weight::from_parts(30_813_000, 3593)
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	// Storage: `System::Account` (r:2 w:2)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	pub fn transfer_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `153`
+		//  Estimated: `6196`
+		// Minimum execution time: 43_150_000 picoseconds.
+		Weight::from_parts(43_919_000, 6196)
+			.saturating_add(T::DbWeight::get().reads(2))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	// Storage: `System::Account` (r:2 w:2)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn transfer_reserve_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `223`
+		//  Estimated: `6196`
+		// Minimum execution time: 67_808_000 picoseconds.
+		Weight::from_parts(69_114_000, 6196)
+			.saturating_add(T::DbWeight::get().reads(8))
+			.saturating_add(T::DbWeight::get().writes(4))
+	}
+	// Storage: `Benchmark::Override` (r:0 w:0)
+	// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	pub fn reserve_asset_deposited() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 18_446_744_073_709_551_000 picoseconds.
+		Weight::from_parts(18_446_744_073_709_551_000, 0)
+	}
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn initiate_reserve_withdraw() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `70`
+		//  Estimated: `3535`
+		// Minimum execution time: 29_312_000 picoseconds.
+		Weight::from_parts(30_347_000, 3535)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	pub fn receive_teleported_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 2_283_000 picoseconds.
+		Weight::from_parts(2_448_000, 0)
+	}
+	// Storage: `System::Account` (r:1 w:1)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	pub fn deposit_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `52`
+		//  Estimated: `3593`
+		// Minimum execution time: 23_556_000 picoseconds.
+		Weight::from_parts(24_419_000, 3593)
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `System::Account` (r:1 w:1)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn deposit_reserve_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `122`
+		//  Estimated: `3593`
+		// Minimum execution time: 58_342_000 picoseconds.
+		Weight::from_parts(59_598_000, 3593)
+			.saturating_add(T::DbWeight::get().reads(7))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn initiate_teleport() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `70`
+		//  Estimated: `3535`
+		// Minimum execution time: 28_285_000 picoseconds.
+		Weight::from_parts(29_016_000, 3535)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	// Storage: `System::Account` (r:1 w:1)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn initiate_transfer() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `122`
+		//  Estimated: `3593`
+		// Minimum execution time: 65_211_000 picoseconds.
+		Weight::from_parts(67_200_000, 3593)
+			.saturating_add(T::DbWeight::get().reads(7))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+}
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs
new file mode 100644
index 00000000000..ae94edc3d73
--- /dev/null
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs
@@ -0,0 +1,355 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `pallet_xcm_benchmarks::generic`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: Compiled, CHAIN: Some("collectives-westend-dev"), DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_xcm_benchmarks::generic
+// --chain=collectives-westend-dev
+// --header=./cumulus/file_header.txt
+// --template=./cumulus/templates/xcm-bench-template.hbs
+// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weights for `pallet_xcm_benchmarks::generic`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo<T> {
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn report_holding() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `70`
+		//  Estimated: `3535`
+		// Minimum execution time: 29_015_000 picoseconds.
+		Weight::from_parts(30_359_000, 3535)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	pub fn buy_execution() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 572_000 picoseconds.
+		Weight::from_parts(637_000, 0)
+	}
+	pub fn pay_fees() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 1_550_000 picoseconds.
+		Weight::from_parts(1_604_000, 0)
+	}
+	// Storage: `PolkadotXcm::Queries` (r:1 w:0)
+	// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	pub fn query_response() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `32`
+		//  Estimated: `3497`
+		// Minimum execution time: 7_354_000 picoseconds.
+		Weight::from_parts(7_808_000, 3497)
+			.saturating_add(T::DbWeight::get().reads(1))
+	}
+	pub fn transact() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 6_716_000 picoseconds.
+		Weight::from_parts(7_067_000, 0)
+	}
+	pub fn refund_surplus() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 1_280_000 picoseconds.
+		Weight::from_parts(1_355_000, 0)
+	}
+	pub fn set_error_handler() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 587_000 picoseconds.
+		Weight::from_parts(645_000, 0)
+	}
+	pub fn set_appendix() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 629_000 picoseconds.
+		Weight::from_parts(662_000, 0)
+	}
+	pub fn clear_error() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 590_000 picoseconds.
+		Weight::from_parts(639_000, 0)
+	}
+	pub fn descend_origin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 651_000 picoseconds.
+		Weight::from_parts(688_000, 0)
+	}
+	pub fn clear_origin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 601_000 picoseconds.
+		Weight::from_parts(630_000, 0)
+	}
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn report_error() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `70`
+		//  Estimated: `3535`
+		// Minimum execution time: 25_650_000 picoseconds.
+		Weight::from_parts(26_440_000, 3535)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1)
+	// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	pub fn claim_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `90`
+		//  Estimated: `3555`
+		// Minimum execution time: 10_492_000 picoseconds.
+		Weight::from_parts(10_875_000, 3555)
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	pub fn trap() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 597_000 picoseconds.
+		Weight::from_parts(647_000, 0)
+	}
+	// Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn subscribe_version() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `38`
+		//  Estimated: `3503`
+		// Minimum execution time: 23_732_000 picoseconds.
+		Weight::from_parts(24_290_000, 3503)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(3))
+	}
+	// Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1)
+	// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	pub fn unsubscribe_version() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 2_446_000 picoseconds.
+		Weight::from_parts(2_613_000, 0)
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	pub fn burn_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 960_000 picoseconds.
+		Weight::from_parts(1_045_000, 0)
+	}
+	pub fn expect_asset() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 703_000 picoseconds.
+		Weight::from_parts(739_000, 0)
+	}
+	pub fn expect_origin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 616_000 picoseconds.
+		Weight::from_parts(651_000, 0)
+	}
+	pub fn expect_error() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 621_000 picoseconds.
+		Weight::from_parts(660_000, 0)
+	}
+	pub fn expect_transact_status() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 794_000 picoseconds.
+		Weight::from_parts(831_000, 0)
+	}
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn query_pallet() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `70`
+		//  Estimated: `3535`
+		// Minimum execution time: 29_527_000 picoseconds.
+		Weight::from_parts(30_614_000, 3535)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	pub fn expect_pallet() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 3_189_000 picoseconds.
+		Weight::from_parts(3_296_000, 0)
+	}
+	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
+	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
+	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
+	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
+	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
+	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	pub fn report_transact_status() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `70`
+		//  Estimated: `3535`
+		// Minimum execution time: 25_965_000 picoseconds.
+		Weight::from_parts(26_468_000, 3535)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	pub fn clear_transact_status() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 618_000 picoseconds.
+		Weight::from_parts(659_000, 0)
+	}
+	pub fn set_topic() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 593_000 picoseconds.
+		Weight::from_parts(618_000, 0)
+	}
+	pub fn clear_topic() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 603_000 picoseconds.
+		Weight::from_parts(634_000, 0)
+	}
+	pub fn alias_origin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 2_000_000 picoseconds.
+		Weight::from_parts(2_000_000, 0)
+	}
+	pub fn set_fees_mode() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 568_000 picoseconds.
+		Weight::from_parts(629_000, 0)
+	}
+	pub fn unpaid_execution() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 598_000 picoseconds.
+		Weight::from_parts(655_000, 0)
+	}
+	pub fn asset_claimer() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 707_000 picoseconds.
+		Weight::from_parts(749_000, 0)
+	}
+	pub fn execute_with_origin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 713_000 picoseconds.
+		Weight::from_parts(776_000, 0)
+	}
+}
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs
index 9eb9b85a391..c5ab21fe8f9 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs
@@ -21,7 +21,6 @@ use super::{
 use frame_support::{
 	parameter_types,
 	traits::{tokens::imbalance::ResolveTo, ConstU32, Contains, Equals, Everything, Nothing},
-	weights::Weight,
 };
 use frame_system::EnsureRoot;
 use pallet_collator_selection::StakingPotAccountId;
@@ -39,12 +38,12 @@ use xcm_builder::{
 	AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain,
 	AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom,
 	DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily,
-	EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter,
-	HashedDescription, IsConcrete, LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser,
-	ParentIsPreset, RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative,
-	SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32,
-	SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents,
-	WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents,
+	EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete,
+	LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset,
+	RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia,
+	SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit,
+	TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic,
+	XcmFeeManagerFromComponents,
 };
 use xcm_executor::XcmExecutor;
 
@@ -125,11 +124,6 @@ pub type XcmOriginToTransactDispatchOrigin = (
 );
 
 parameter_types! {
-	/// The amount of weight an XCM operation takes. This is a safe overestimate.
-	pub const BaseXcmWeight: Weight = Weight::from_parts(1_000_000_000, 1024);
-	/// A temporary weight value for each XCM instruction.
-	/// NOTE: This should be removed after we account for PoV weights.
-	pub const TempFixedXcmWeight: Weight = Weight::from_parts(1_000_000_000, 0);
 	pub const MaxInstructions: u32 = 100;
 	pub const MaxAssetsIntoHolding: u32 = 64;
 	// Fellows pluralistic body.
@@ -208,7 +202,11 @@ impl xcm_executor::Config for XcmConfig {
 	type IsTeleporter = TrustedTeleporters;
 	type UniversalLocation = UniversalLocation;
 	type Barrier = Barrier;
-	type Weigher = FixedWeightBounds<TempFixedXcmWeight, RuntimeCall, MaxInstructions>;
+	type Weigher = WeightInfoBounds<
+		crate::weights::xcm::CollectivesWestendXcmWeight<RuntimeCall>,
+		RuntimeCall,
+		MaxInstructions,
+	>;
 	type Trader = UsingComponents<
 		WeightToFee,
 		WndLocation,
@@ -275,7 +273,11 @@ impl pallet_xcm::Config for Runtime {
 	type XcmExecutor = XcmExecutor<XcmConfig>;
 	type XcmTeleportFilter = Everything;
 	type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location.
-	type Weigher = FixedWeightBounds<BaseXcmWeight, RuntimeCall, MaxInstructions>;
+	type Weigher = WeightInfoBounds<
+		crate::weights::xcm::CollectivesWestendXcmWeight<RuntimeCall>,
+		RuntimeCall,
+		MaxInstructions,
+	>;
 	type UniversalLocation = UniversalLocation;
 	type RuntimeOrigin = RuntimeOrigin;
 	type RuntimeCall = RuntimeCall;
diff --git a/prdoc/pr_6820.prdoc b/prdoc/pr_6820.prdoc
new file mode 100644
index 00000000000..85249a33341
--- /dev/null
+++ b/prdoc/pr_6820.prdoc
@@ -0,0 +1,8 @@
+title: Add XCM benchmarks to collectives-westend
+doc:
+- audience: Runtime Dev
+  description: Collectives-westend was using `FixedWeightBounds`, meaning the same
+    weight per instruction. Added proper benchmarks.
+crates:
+- name: collectives-westend-runtime
+  bump: patch
-- 
GitLab


From 738282a2c4127f5e6a1c8d50235ba126b9f05025 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Bastian=20K=C3=B6cher?= <git@kchr.de>
Date: Sat, 11 Jan 2025 11:32:50 +0100
Subject: [PATCH 039/116] Fix incorrected deprecated message (#7118)

---
 substrate/frame/src/lib.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs
index 23d22683be2..f79a52bc6c5 100644
--- a/substrate/frame/src/lib.rs
+++ b/substrate/frame/src/lib.rs
@@ -327,7 +327,7 @@ pub mod testing_prelude {
 
 	pub use frame_system::{self, mocking::*};
 
-	#[deprecated(note = "Use `frame::testing_prelude::TestExternalities` instead.")]
+	#[deprecated(note = "Use `frame::testing_prelude::TestState` instead.")]
 	pub use sp_io::TestExternalities;
 
 	pub use sp_io::TestExternalities as TestState;
-- 
GitLab


From 7d8e3a434ea1e760190456e8df1359aa8137e16a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Bastian=20K=C3=B6cher?= <git@kchr.de>
Date: Mon, 13 Jan 2025 13:32:01 +0100
Subject: [PATCH 040/116] reference-docs: Start `state` and mention well known
 keys (#7037)

Closes: https://github.com/paritytech/polkadot-sdk/issues/7033
---
 Cargo.lock                              |  1 +
 docs/sdk/Cargo.toml                     |  1 +
 docs/sdk/src/reference_docs/mod.rs      |  3 +++
 docs/sdk/src/reference_docs/state.rs    | 12 ++++++++++++
 substrate/primitives/storage/src/lib.rs |  4 ++++
 5 files changed, 21 insertions(+)
 create mode 100644 docs/sdk/src/reference_docs/state.rs

diff --git a/Cargo.lock b/Cargo.lock
index 4e2272bdc98..cfb805fbe84 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -19202,6 +19202,7 @@ dependencies = [
  "sp-runtime 31.0.1",
  "sp-runtime-interface 24.0.0",
  "sp-std 14.0.0",
+ "sp-storage 19.0.0",
  "sp-tracing 16.0.0",
  "sp-version 29.0.0",
  "sp-weights 27.0.0",
diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml
index f526c07796e..4d83e2045ab 100644
--- a/docs/sdk/Cargo.toml
+++ b/docs/sdk/Cargo.toml
@@ -110,6 +110,7 @@ sp-offchain = { workspace = true, default-features = true }
 sp-runtime = { workspace = true, default-features = true }
 sp-runtime-interface = { workspace = true, default-features = true }
 sp-std = { workspace = true, default-features = true }
+sp-storage = { workspace = true, default-features = true }
 sp-tracing = { workspace = true, default-features = true }
 sp-version = { workspace = true, default-features = true }
 sp-weights = { workspace = true, default-features = true }
diff --git a/docs/sdk/src/reference_docs/mod.rs b/docs/sdk/src/reference_docs/mod.rs
index e47eece784c..7ad8a37241b 100644
--- a/docs/sdk/src/reference_docs/mod.rs
+++ b/docs/sdk/src/reference_docs/mod.rs
@@ -111,3 +111,6 @@ pub mod custom_runtime_api_rpc;
 
 /// The [`polkadot-omni-node`](https://crates.io/crates/polkadot-omni-node) and its related binaries.
 pub mod omni_node;
+
+/// Learn about the state in Substrate.
+pub mod state;
diff --git a/docs/sdk/src/reference_docs/state.rs b/docs/sdk/src/reference_docs/state.rs
new file mode 100644
index 00000000000..a8138caebf1
--- /dev/null
+++ b/docs/sdk/src/reference_docs/state.rs
@@ -0,0 +1,12 @@
+//! # State
+//!
+//! The state is abstracted as a key-value like database. Every item that
+//! needs to be persisted by the [State Transition
+//! Function](crate::reference_docs::blockchain_state_machines) is written to the state.
+//!
+//! ## Special keys
+//!
+//! The key-value pairs in the state are represented as byte sequences. The node
+//! doesn't know how to interpret most the key-value pairs. However, there exist some
+//! special keys and its values that are known to the node, the so-called
+//! [`well-known-keys`](sp_storage::well_known_keys).
diff --git a/substrate/primitives/storage/src/lib.rs b/substrate/primitives/storage/src/lib.rs
index 4b25f85fba6..df7570a1854 100644
--- a/substrate/primitives/storage/src/lib.rs
+++ b/substrate/primitives/storage/src/lib.rs
@@ -191,11 +191,15 @@ pub mod well_known_keys {
 	/// Wasm code of the runtime.
 	///
 	/// Stored as a raw byte vector. Required by substrate.
+	///
+	/// Encodes to `0x3A636F6465`.
 	pub const CODE: &[u8] = b":code";
 
 	/// Number of wasm linear memory pages required for execution of the runtime.
 	///
 	/// The type of this value is encoded `u64`.
+	///
+	/// Encodes to `0x307833413633364636343635`
 	pub const HEAP_PAGES: &[u8] = b":heappages";
 
 	/// Current extrinsic index (u32) is stored under this key.
-- 
GitLab


From 2f7cf417136537d007d5302d1d08a8958f8a5c97 Mon Sep 17 00:00:00 2001
From: Branislav Kontur <bkontur@gmail.com>
Date: Mon, 13 Jan 2025 15:44:09 +0100
Subject: [PATCH 041/116] xcm: Fixes for `UnpaidLocalExporter` (#7126)

## Description

This PR deprecates `UnpaidLocalExporter` in favor of the new
`LocalExporter`. First, the name is misleading, as it can be used in
both paid and unpaid scenarios. Second, it contains a hard-coded channel
0, whereas `LocalExporter` uses the same algorithm as `xcm-exporter`.

## Future Improvements

Remove the `channel` argument and slightly modify the
`ExportXcm::validate` signature as part of [this
issue](https://github.com/orgs/paritytech/projects/145/views/8?pane=issue&itemId=84899273).

---------

Co-authored-by: command-bot <>
---
 polkadot/xcm/xcm-builder/src/lib.rs           |  6 +-
 .../src/tests/bridging/local_para_para.rs     |  2 +-
 .../src/tests/bridging/local_relay_relay.rs   |  2 +-
 .../xcm/xcm-builder/src/tests/bridging/mod.rs |  2 +-
 .../xcm/xcm-builder/src/universal_exports.rs  | 62 +++++++++++++++++--
 prdoc/pr_7126.prdoc                           |  7 +++
 6 files changed, 71 insertions(+), 10 deletions(-)
 create mode 100644 prdoc/pr_7126.prdoc

diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs
index 3d68d8ed16a..e23412a97eb 100644
--- a/polkadot/xcm/xcm-builder/src/lib.rs
+++ b/polkadot/xcm/xcm-builder/src/lib.rs
@@ -132,11 +132,13 @@ pub use routing::{
 mod transactional;
 pub use transactional::FrameTransactionalProcessor;
 
+#[allow(deprecated)]
+pub use universal_exports::UnpaidLocalExporter;
 mod universal_exports;
 pub use universal_exports::{
 	ensure_is_remote, BridgeBlobDispatcher, BridgeMessage, DispatchBlob, DispatchBlobError,
-	ExporterFor, HaulBlob, HaulBlobError, HaulBlobExporter, NetworkExportTable,
-	NetworkExportTableItem, SovereignPaidRemoteExporter, UnpaidLocalExporter, UnpaidRemoteExporter,
+	ExporterFor, HaulBlob, HaulBlobError, HaulBlobExporter, LocalExporter, NetworkExportTable,
+	NetworkExportTableItem, SovereignPaidRemoteExporter, UnpaidRemoteExporter,
 };
 
 mod weight;
diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs
index ea584bf9d48..5e930fe575c 100644
--- a/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs
+++ b/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs
@@ -28,7 +28,7 @@ parameter_types! {
 type TheBridge =
 	TestBridge<BridgeBlobDispatcher<TestRemoteIncomingRouter, RemoteUniversalLocation, ()>>;
 type Router = TestTopic<
-	UnpaidLocalExporter<
+	LocalExporter<
 		HaulBlobExporter<TheBridge, RemoteNetwork, AlwaysLatest, Price>,
 		UniversalLocation,
 	>,
diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs
index 38ffe2532d5..a41f0972181 100644
--- a/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs
+++ b/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs
@@ -28,7 +28,7 @@ parameter_types! {
 type TheBridge =
 	TestBridge<BridgeBlobDispatcher<TestRemoteIncomingRouter, RemoteUniversalLocation, ()>>;
 type Router = TestTopic<
-	UnpaidLocalExporter<
+	LocalExporter<
 		HaulBlobExporter<TheBridge, RemoteNetwork, AlwaysLatest, Price>,
 		UniversalLocation,
 	>,
diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs
index 767575e7f2d..90ad9921d65 100644
--- a/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs
+++ b/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs
@@ -209,7 +209,7 @@ impl<Local: Get<Junctions>, Remote: Get<Junctions>, RemoteExporter: ExportXcm> S
 		let origin = Local::get().relative_to(&Remote::get());
 		AllowUnpaidFrom::set(vec![origin.clone()]);
 		set_exporter_override(price::<RemoteExporter>, deliver::<RemoteExporter>);
-		// The we execute it:
+		// Then we execute it:
 		let mut id = fake_id();
 		let outcome = XcmExecutor::<TestConfig>::prepare_and_execute(
 			origin,
diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs
index 6b3c3adf737..e215aea3ab6 100644
--- a/polkadot/xcm/xcm-builder/src/universal_exports.rs
+++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs
@@ -16,6 +16,8 @@
 
 //! Traits and utilities to help with origin mutation and bridging.
 
+#![allow(deprecated)]
+
 use crate::InspectMessageQueues;
 use alloc::{vec, vec::Vec};
 use codec::{Decode, Encode};
@@ -58,6 +60,8 @@ pub fn ensure_is_remote(
 /// that the message sending cannot be abused in any way.
 ///
 /// This is only useful when the local chain has bridging capabilities.
+#[deprecated(note = "Will be removed after July 2025; It uses hard-coded channel `0`, \
+	use `xcm_builder::LocalExporter` directly instead.")]
 pub struct UnpaidLocalExporter<Exporter, UniversalLocation>(
 	PhantomData<(Exporter, UniversalLocation)>,
 );
@@ -100,6 +104,54 @@ impl<Exporter: ExportXcm, UniversalLocation: Get<InteriorLocation>> SendXcm
 	fn ensure_successful_delivery(_: Option<Location>) {}
 }
 
+/// Implementation of `SendXcm` which uses the given `ExportXcm` implementation in order to forward
+/// the message over a bridge.
+///
+/// This is only useful when the local chain has bridging capabilities.
+pub struct LocalExporter<Exporter, UniversalLocation>(PhantomData<(Exporter, UniversalLocation)>);
+impl<Exporter: ExportXcm, UniversalLocation: Get<InteriorLocation>> SendXcm
+	for LocalExporter<Exporter, UniversalLocation>
+{
+	type Ticket = Exporter::Ticket;
+
+	fn validate(
+		dest: &mut Option<Location>,
+		msg: &mut Option<Xcm<()>>,
+	) -> SendResult<Exporter::Ticket> {
+		// This `clone` ensures that `dest` is not consumed in any case.
+		let d = dest.clone().take().ok_or(MissingArgument)?;
+		let universal_source = UniversalLocation::get();
+		let devolved = ensure_is_remote(universal_source.clone(), d).map_err(|_| NotApplicable)?;
+		let (remote_network, remote_location) = devolved;
+		let xcm = msg.take().ok_or(MissingArgument)?;
+
+		let hash =
+			(Some(Location::here()), &remote_location).using_encoded(sp_io::hashing::blake2_128);
+		let channel = u32::decode(&mut hash.as_ref()).unwrap_or(0);
+
+		validate_export::<Exporter>(
+			remote_network,
+			channel,
+			universal_source,
+			remote_location,
+			xcm.clone(),
+		)
+		.inspect_err(|err| {
+			if let NotApplicable = err {
+				// We need to make sure that msg is not consumed in case of `NotApplicable`.
+				*msg = Some(xcm);
+			}
+		})
+	}
+
+	fn deliver(ticket: Exporter::Ticket) -> Result<XcmHash, SendError> {
+		Exporter::deliver(ticket)
+	}
+
+	#[cfg(feature = "runtime-benchmarks")]
+	fn ensure_successful_delivery(_: Option<Location>) {}
+}
+
 pub trait ExporterFor {
 	/// Return the locally-routable bridge (if any) capable of forwarding `message` to the
 	/// `remote_location` on the remote `network`, together with the payment which is required.
@@ -703,9 +755,9 @@ mod tests {
 		let local_dest: Location = (Parent, Parachain(5678)).into();
 		assert!(ensure_is_remote(UniversalLocation::get(), local_dest.clone()).is_err());
 
-		// UnpaidLocalExporter
+		// LocalExporter
 		ensure_validate_does_not_consume_dest_or_msg::<
-			UnpaidLocalExporter<RoutableBridgeExporter, UniversalLocation>,
+			LocalExporter<RoutableBridgeExporter, UniversalLocation>,
 		>(local_dest.clone(), |result| assert_eq!(Err(NotApplicable), result));
 
 		// 2. check with not applicable from the inner router (using `NotApplicableBridgeSender`)
@@ -713,14 +765,14 @@ mod tests {
 			(Parent, Parent, DifferentRemote::get(), RemoteDestination::get()).into();
 		assert!(ensure_is_remote(UniversalLocation::get(), remote_dest.clone()).is_ok());
 
-		// UnpaidLocalExporter
+		// LocalExporter
 		ensure_validate_does_not_consume_dest_or_msg::<
-			UnpaidLocalExporter<NotApplicableBridgeExporter, UniversalLocation>,
+			LocalExporter<NotApplicableBridgeExporter, UniversalLocation>,
 		>(remote_dest.clone(), |result| assert_eq!(Err(NotApplicable), result));
 
 		// 3. Ok - deliver
 		// UnpaidRemoteExporter
-		assert_ok!(send_xcm::<UnpaidLocalExporter<RoutableBridgeExporter, UniversalLocation>>(
+		assert_ok!(send_xcm::<LocalExporter<RoutableBridgeExporter, UniversalLocation>>(
 			remote_dest,
 			Xcm::default()
 		));
diff --git a/prdoc/pr_7126.prdoc b/prdoc/pr_7126.prdoc
new file mode 100644
index 00000000000..1a86af1b2d1
--- /dev/null
+++ b/prdoc/pr_7126.prdoc
@@ -0,0 +1,7 @@
+title: 'xcm: Fixes for `UnpaidLocalExporter`'
+doc:
+- audience: Runtime Dev
+  description: This PR deprecates `UnpaidLocalExporter` in favor of the new `LocalExporter`. First, the name is misleading, as it can be used in both paid and unpaid scenarios. Second, it contains a hard-coded channel 0, whereas `LocalExporter` uses the same algorithm as `xcm-exporter`.
+crates:
+- name: staging-xcm-builder
+  bump: minor
-- 
GitLab


From ba572ae892d4e4fae89ca053d8a137117b0f3a17 Mon Sep 17 00:00:00 2001
From: PG Herveou <pgherveou@gmail.com>
Date: Mon, 13 Jan 2025 15:49:37 +0100
Subject: [PATCH 042/116] [pallet-revive] Update gas encoding (#6689)

Update the current approach to attach the `ref_time`, `pov` and
`deposit` parameters to an Ethereum transaction.
Previously we will pass these 3 parameters along with the signed
payload, and check that the fees resulting from `gas x gas_price` match
the actual fees paid by the user for the extrinsic.

This approach unfortunately can be attacked. A malicious actor could
force such a transaction to fail by injecting low values for some of
these extra parameters as they are not part of the signed payload.

The new approach encodes these 3 extra parameters in the lower digits of
the transaction gas, approximating the the log2 of the actual values to
encode each components on 2 digits

---------

Co-authored-by: GitHub Action <action@github.com>
Co-authored-by: command-bot <>
---
 .../assets/asset-hub-westend/src/lib.rs       |   1 +
 prdoc/pr_6689.prdoc                           |  19 ++
 substrate/bin/node/runtime/src/lib.rs         |   1 +
 .../frame/revive/rpc/examples/js/bun.lockb    | Bin 40649 -> 40649 bytes
 .../frame/revive/rpc/examples/js/package.json |   4 +-
 .../rpc/examples/js/src/geth-diff.test.ts     |  22 --
 .../revive/rpc/examples/js/src/piggy-bank.ts  |  15 +-
 .../frame/revive/rpc/revive_chain.metadata    | Bin 659977 -> 661594 bytes
 substrate/frame/revive/rpc/src/client.rs      |   4 +-
 substrate/frame/revive/rpc/src/lib.rs         |  23 +-
 substrate/frame/revive/src/evm.rs             |   2 +
 substrate/frame/revive/src/evm/api/byte.rs    |   5 +-
 substrate/frame/revive/src/evm/gas_encoder.rs | 174 +++++++++++++
 substrate/frame/revive/src/evm/runtime.rs     | 228 +++++++++---------
 substrate/frame/revive/src/lib.rs             |  31 ++-
 15 files changed, 340 insertions(+), 189 deletions(-)
 create mode 100644 prdoc/pr_6689.prdoc
 create mode 100644 substrate/frame/revive/src/evm/gas_encoder.rs

diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
index 5fb495e4e8c..71cfdc58cce 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
@@ -981,6 +981,7 @@ impl pallet_revive::Config for Runtime {
 	type Xcm = pallet_xcm::Pallet<Self>;
 	type ChainId = ConstU64<420_420_421>;
 	type NativeToEthRatio = ConstU32<1_000_000>; // 10^(18 - 12) Eth is 10^18, Native is 10^12.
+	type EthGasEncoder = ();
 }
 
 impl TryFrom<RuntimeCall> for pallet_revive::Call<Runtime> {
diff --git a/prdoc/pr_6689.prdoc b/prdoc/pr_6689.prdoc
new file mode 100644
index 00000000000..2cbb49cd7dd
--- /dev/null
+++ b/prdoc/pr_6689.prdoc
@@ -0,0 +1,19 @@
+title: '[pallet-revive] Update gas encoding'
+doc:
+- audience: Runtime Dev
+  description: |-
+    Update the current approach to attach the `ref_time`, `pov` and `deposit` parameters to an Ethereum transaction.
+Previously, these three parameters were passed along with the signed payload, and the fees resulting from gas × gas_price were checked to ensure they matched the actual fees paid by the user for the extrinsic
+
+    This approach unfortunately can be attacked. A malicious actor could force such a transaction to fail by injecting low values for some of these extra parameters as they are not part of the signed payload.
+
+    The new approach encodes these 3 extra parameters in the lower digits of the transaction gas, using the log2 of the actual values to  encode each components on 2 digits
+crates:
+- name: pallet-revive-eth-rpc
+  bump: minor
+- name: pallet-revive
+  bump: minor
+- name: asset-hub-westend-runtime
+  bump: minor
+- name: pallet-revive-mock-network
+  bump: minor
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index 93b134e8165..7de04b27ff8 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -1468,6 +1468,7 @@ impl pallet_revive::Config for Runtime {
 	type Xcm = ();
 	type ChainId = ConstU64<420_420_420>;
 	type NativeToEthRatio = ConstU32<1_000_000>; // 10^(18 - 12) Eth is 10^18, Native is 10^12.
+	type EthGasEncoder = ();
 }
 
 impl pallet_sudo::Config for Runtime {
diff --git a/substrate/frame/revive/rpc/examples/js/bun.lockb b/substrate/frame/revive/rpc/examples/js/bun.lockb
index 46994bb147547bfb9b960b7630d1a6d274ee75dd..67df5841e43fba141c7a146a1e4a8958b4c7a84c 100755
GIT binary patch
delta 279
zcmV+y0qFk8y#mR-0+22soiH!oX)-nTSX|in0HhK|x0gz@V`@A~BLYiBe~%4#u}+#2
zlVl7ilMoXalcWwPlaLz;vsn|wBtT0(J!NEn1~X2p?UXB7Jvi1>R<9n);G^G0U~|Q5
zMB7D`Fj;o&?!D+#cbOwVUcVM=HzK{nVywxpEIqXw>`RkfI2b@*8u|njLIC6<#`-5a
zC$;wp^T;@qwKuAj(~g=w!jb0XqITSH4R$<SCAs_2cDiF<8mE~?o{3IGI1jtCmts+~
zwKyj*0X(zaXb%AeI{*LxJhSm>p8^RE0000m0000WvzcsD2a_Ol8k6jEG_yK%I~4&l
dlW~tAlTe%nlg@(=1Tio!GLvzDAG6Mh`xGRLa$o=e

delta 274
zcmV+t0qy?Dy#mR-0+22sm$qsC-QFrj08A7+@VAjr|M5l`Sq&6l^96BcM<AV~u}+#2
zlV}StlNb{alPC%>vse?wBtSbdFKlv(4w$;pImPzg)KO3c)E*{sr!?C>Pa^u})5WrH
z?2dCjw;3D6xm-<8UmEmHQmoo8gdP^y(@S_h*pic7I2b^vs4P<l$7NlX31YyVQ}rgP
z=@X(R>K)kA=h%S6+OX6+SMP!aeojwTtkMl1`0TE2v1*0>msHX$nBE8?5g-|}wKyj*
z0XwtZXb%AeIsgCwJG1d=p8^Rm0000W0000EvzcsD2a_Cg5|bEoB9rWM8nZfdI~9|J
YfgqDloClN6gAV~QlVOG;v(Ab86w_&Re*gdg

diff --git a/substrate/frame/revive/rpc/examples/js/package.json b/substrate/frame/revive/rpc/examples/js/package.json
index 6d8d00fd421..0119f4f34a1 100644
--- a/substrate/frame/revive/rpc/examples/js/package.json
+++ b/substrate/frame/revive/rpc/examples/js/package.json
@@ -9,10 +9,10 @@
 		"preview": "vite preview"
 	},
 	"dependencies": {
+		"@parity/revive": "^0.0.5",
 		"ethers": "^6.13.4",
 		"solc": "^0.8.28",
-		"viem": "^2.21.47",
-		"@parity/revive": "^0.0.5"
+		"viem": "^2.21.47"
 	},
 	"devDependencies": {
 		"prettier": "^3.3.3",
diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
index b9ee877927b..871adeccbc9 100644
--- a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
+++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
@@ -289,27 +289,5 @@ for (const env of envs) {
 				],
 			})
 		})
-
-		test.only('eth_estimate (no gas specified) child_call', async () => {
-			let balance = await env.serverWallet.getBalance(env.accountWallet.account)
-			expect(balance).toBe(0n)
-
-			const data = encodeFunctionData({
-				abi: FlipperCallerAbi,
-				functionName: 'callFlip',
-			})
-
-			await env.accountWallet.request({
-				method: 'eth_estimateGas',
-				params: [
-					{
-						data,
-						from: env.accountWallet.account.address,
-						to: flipperCallerAddr,
-						gas: `0x${Number(1000000).toString(16)}`,
-					},
-				],
-			})
-		})
 	})
 }
diff --git a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts
index 0040b0c78dc..8289ac8b76e 100644
--- a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts
+++ b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts
@@ -1,9 +1,9 @@
 import { assert, getByteCode, walletClient } from './lib.ts'
-import { abi } from '../abi/piggyBank.ts'
+import { PiggyBankAbi } from '../abi/piggyBank.ts'
 import { parseEther } from 'viem'
 
 const hash = await walletClient.deployContract({
-	abi,
+	abi: PiggyBankAbi,
 	bytecode: getByteCode('piggyBank'),
 })
 const deployReceipt = await walletClient.waitForTransactionReceipt({ hash })
@@ -16,7 +16,7 @@ assert(contractAddress, 'Contract address should be set')
 	const result = await walletClient.estimateContractGas({
 		account: walletClient.account,
 		address: contractAddress,
-		abi,
+		abi: PiggyBankAbi,
 		functionName: 'deposit',
 		value: parseEther('10'),
 	})
@@ -26,7 +26,7 @@ assert(contractAddress, 'Contract address should be set')
 	const { request } = await walletClient.simulateContract({
 		account: walletClient.account,
 		address: contractAddress,
-		abi,
+		abi: PiggyBankAbi,
 		functionName: 'deposit',
 		value: parseEther('10'),
 	})
@@ -36,9 +36,6 @@ assert(contractAddress, 'Contract address should be set')
 
 	const receipt = await walletClient.waitForTransactionReceipt({ hash })
 	console.log(`Deposit receipt: ${receipt.status}`)
-	if (process.env.STOP) {
-		process.exit(0)
-	}
 }
 
 // Withdraw 5 WST
@@ -46,7 +43,7 @@ assert(contractAddress, 'Contract address should be set')
 	const { request } = await walletClient.simulateContract({
 		account: walletClient.account,
 		address: contractAddress,
-		abi,
+		abi: PiggyBankAbi,
 		functionName: 'withdraw',
 		args: [parseEther('5')],
 	})
@@ -58,7 +55,7 @@ assert(contractAddress, 'Contract address should be set')
 	// Check remaining balance
 	const balance = await walletClient.readContract({
 		address: contractAddress,
-		abi,
+		abi: PiggyBankAbi,
 		functionName: 'getDeposit',
 	})
 
diff --git a/substrate/frame/revive/rpc/revive_chain.metadata b/substrate/frame/revive/rpc/revive_chain.metadata
index 64b1f2014dd06815fcea6a87bc96306eb00eda8b..402e8c2d22b21471929e9c61acd2cc968af614cf 100644
GIT binary patch
delta 4640
zcmaJ_dr%eE8Q*j7Ue4X?W7i0Zh{+N~A%Pn(V6jRvK5&AG&q$0l3@-N`VCCN3>+W72
z8bzj|;2(C9rsmW%w$a8`nsH(#V^=%csYXprCp9=4<6AUpe1O4;&DcEJ@7zVX*PE$>
z?Cv?=<M%zzcfOBx@8q1?k&_Z8mLh)An$nhncBZ)A*pd;M9?}E#bEDyEV$3r1@O(pW
zP=mynSJVFP7MK0*7I)Lhlme+b8jifaIeUct^vnDyAIhZ}?fkQkaK&g*9gz)1tCzz{
zvus*wC?r=Cc_lG)zf7;J;qy~uMGKZ8`j>V-8_D!+JO4PcsL;Vz4=qx|5#6w=!h~sO
z;@yLC9MLtL@_$9oP&B3|poBssC|5Uwl4e~avaYc=IiSOOz)~BC><E|hWmOA=qCr)w
z&9eZrNi?vMu5IUUxDcY7C%V(<-nIN7I=h|srS&seI#Z%*!DzrDL3?=z-*g)V>7|vP
z{V0?Eag|4IDUZQy)Q$R(t_0=Aeh1WoYJege#3V+8VyU{8Cs#);*=p2fQwb3{LJU<0
z=z3HODuzl-IUF@DIii^6ud7LoZV;K_46ql~l$C%L1!KWb(^P9U67+L>^5tc8REsCi
z{`V@+%}lWfm;$8AQAxbC>!Fkpbl6*-H227YZS+s8JYxpSYB)@Ss$!8)Go8EEGZ9Ur
z->mXHGYUed7#29EM2@HNSG@6tC#%KL877(~n{|3L1hB!eN|4A=4MH9R8fU3&DfPr$
zfMl7})Ib>ga=FE92$33afbQ7EB4@BKqJo`<tOSFw0aO}kbgb45a353MQtL!jXM{~M
zh&Ss^M-b4HCKd8dtBo=Fxk|K%N$K|jkbx8LhX+!TEocza<pAt~T~+G`w-~}12|<d1
zM}fYD0H`KQK@SAa^vf|_dIK>U4b_4evgErDXuxe|za#(_3#bBqU`fcFF6%Xm*auS)
zpBnx=vr&mug@|I`_>CuH&~P4QMwDijW+f=|1rnGVEorZK#{>UU?232r1UG%}&6HWW
zpdm<rO_R{z=#vvUG_#u2DGjO~HL3up390Dp*k~I*mztV$cj)&+k?0WxahRAUjuNxP
zT)S@%-ZPxKj^On1C5=R_t+ODn0wF~W7cGu`E_A+leh~-%@<p087p2m_e~KSPh4d0U
zMih&vgrO{JvJ6!-)xc96SJ+SYL>GQhC>A-+RVZ?k>8o9MHY%m3yKwG;X(FFKFTxHC
z$VR0i@=9~zWP=jvc|A;ct`yi1$CUxyLSjZkmKW$=;mc4t(0b?9^V};8(%>?~;JI><
zqo<GJ3H0Tocs!az|8NvPI(mVKo~h94HN8<=1hvbAvjysy&^h*nqnJl%0fpyS>OF?%
zq6)hB7|upZ=;~wmvBD)HFHc`&5Hd%Jc==~hE6=}xN;saY1X!_OE=A<vhvU7id<jSB
z=g08E;T0k(DAyo;;S9}GEQRA#rs7drbsSGB2q)OGLX0<N1(Q2q4MVxYVZC}B??6$S
zaRO)2-JjvY{N@BPtC^VAdp=nmC#DtN5!FxNadgTFJPEZrGG0G{OHmu`I)QW1I{NJi
zoIQShLNMBrgV7cfm30!2q?MoJLudmXcM|_AcVhzIrewZNjF00s(b1>yRJ56{IgKBt
zA6>-5(N=o;B7O<IPv5(MOU11s;umr#oj^IY%$>CREFR<EnqX%qv$L*0^qp~L*2PqG
zGT9|)JGD>aEO?weji-S0n8GvNxV&Iz0@mHhSa&;E*~2xT+v~7?d<Mkqi^F{|0r$Zl
z!+kIT_u+Wlhm&w0c4W^!hyAFNHlM@!{!bG4yW;t~lK8tE{+ZYC2<knLXQm!c;J%gu
zUOP#boyV!(la7HW9ga88<LA*?+It?4&gd4AyfiwgXo_4^$#LC4=jI_B3%JX4UpLOA
zPhY?fp=*xieX)q4JGAEl9*3Mz!5+|!rf;3b8K}nzK@aWdhJf_3ouL0tBC2?st|i%Y
z*O8`Oghc&*0D%ih^vp%P6lAibad{-#djXH_$G~<AyKx2uSB|7NF5y9d>YGb=tluNW
zmk=by<q?twPMwoS0zG#Hr_*I$;6gNn{^|=nJ!6!Fa#)3%-VCMxDGtejIz`p0)L<6v
z{Q{53$d=Glr<6^d77W6T#E1shDrOda_%a?nDoa8I{reRYizmTms;WhzRxVw58Bai2
zbme78*)00jW&GIeA0gIsEkKgjy_Wu?n$wN3R&#{pQ|SsGIU|XW8v^basJz3Zq{=%C
zoC1fIJzQxLmv-MHL+PtmaG5ZQZH$q)F>K?>+^ht7XOj=sStswZ=})iX2@7%)$j2nB
z2gyu*ega)#GF_oVSIAI|p+~M_sjwJ$1}=-qEYhXG7`rVd1NprITk5b*j%7?H&HEC+
zm<hlo79I{sL8#pK?_RjhO6l8Q;-ybaOORd`pGReJ!ks)S>(53wn2XZN6F6gMa*jlm
z^LV&0$AJQ$vE+qQIfCB3hR4$#*P(9DxQ@ecGrfNuF3u(NyX$yj?vez*R3`hS(veun
zxW>|zH*j#NlE6cfd5FWq9=ta_ol_GS!pRI_haoJ{?nCZWI_D-{>{`+P@z<NU04|st
zH}P}uDCxo3{%C>&iXbk{_u^7C5SC^~Xfyq)7pKzS_uy?Us}mRt;u#B)7+VvF+gO0^
zt7!w-_rl+=hb{5mb<%4Rw+>!i>m_cz<eh0os={HTDq^T%mHxLEd+0xVq0+s^P&GZi
zAwfq+yo!zoG|*eOa2nlq3;$&JCMW^VLt%jaV}6}tLYuRR-oAzPlFbR?wlc`u?|Eix
z9OUir4u^s*xjH4TlWzP9GGjZ%eR%Pd_d&?OiQbtUt(^`xd$@K>9Q=UcC7lA?avS?_
z9$avT`tXE72ibnMmpd$ZDca&mwFlqE-wr{~?euz|7Fw3w@9+{@LlQ(>j+f&y--a$3
zdhl4=2z`01i-e)g>8%O0%l-YKpUXl{L#=AC8IDa&vDv3VQA0{K3{3;$28?geII}Lg
zWk3SFkwBC+bAAvthmB4VP^g+P?f@xt-w#MONEn*;AS}zvG~FUqN+c3eNwDfZsAYa`
z1(KiBYt?`p)Y+I}LKiAWbePqGsu@wN*@3zN<JhDK>YZ6G8i9hr#yC)HTDqas!iO#^
z4Z0eXgHg~Ffc-r1Nk3crEWvBf5(M=Fly2|XBUF0m!~2D7ltX9k7dnR}w%)m72kqG}
z%tQIK^h2QpO{NVW3Liil{n!CvCYomJ2ZZg&T_~2(M-B-`+!bOm?K>na_vMSRI#wYP
zN<I>LQdQAwH5H0E)$9)eRgi>rMPa+<AHocm7!}9Ch++{o`5nZ~_O6eGKfBy*;yU}Q
zZeg^Gu00|YqxJR=-wAIbdw!RYF&Ium{aK-f-yre_Xu&yQEFE@E$bhym@0{=|+DL(P
zmhC<-Tz9!QIbyc+(Ek1IiqI+Ub%2#fFdthb`)K10VK#mHsxS?GLN8twevXdYHD3x*
z1kK5rYeF}=9AlI2#<R6TUjTed-GWq1kKYhh!GOEqrtk_Xw?DfntU=yFaWET=7SNY^
zg;DpuD(uz0LWc_}G^J0t4Bh|DK4B%QuxoD%6)vRGy<ZEzh?za>j<DR7zCxN>PnxS>
zd?2hG=z0yfM3hTu`5ht8{`wo?x2}}fN@Z+?x(>L<(SnN*+n)ajlFRYfa5ukR^p;Wd
zy^y*g*Y`^l-LT6y39a66%eNdl@}D8!p+W1U)R_3!qz(4tE?);qSoxdVw+n$!8$G`1
zJk*O_6s7pa&{H1YSa9hN9^YcrX}^%-YvZBcJ>&CjMtkiwgM57m?Xx38d{u6A(Ed~c
Jo$kXD_dmxEok9Qr

delta 3431
zcmZu!3s6+o8NTQ4UH&~QdM^cZ@sWUvLPyAA0AU0J=yoJFD#l0E(#@*Zu*$Ny4<bIh
zQ>oG!GI28I7aePHWI8l)M#r&Noj55;X(3azI->-OiA1S@g5s-MqtkP_Y}wn}nT2!r
zod5gIcfNDa|DU~nGNtx-N|Z+qxWu+lR815)6UBdW>|;w^?B4Uoc;);tQ=pT~Qr_4h
zJh75eJhD$%G%0a;nVjfzRcuc5xT<sf0e5+M;yO8To9y%A;#%RZvi&#02M@`6_-Kdu
zOJ3TFfB&U99=|wjwxGD%JX;CvGT)o5{BXoPZ48OV#3pkbZduCD`b06u?9FU4v%e@M
zvk8(-#D;Z8qa1kjGl)r_<*IZ$t7M<w?X9RMBs!Rp&5;G`<T6*4+Z*)#&ydpk8GIWz
ze){uqFUJwlEK)H>j29EcB$0bQ4)=cv2}(yZ9F4`Pi%BfDUWKVbn#ftO<RQeqMpERh
z0iV0V?_TfE5J}ETd3~NE+%pePUIiP@xduO@86wB=43~umuff|S7jIpI)Rp<7U|Uid
zU^)e1Ims1?MOy5wkU5T%-t~H9fy-qu!f}NxZkg;4mIo{>Zjay~4i;-!vRUBvuwX9Q
z;qwWcLko7`z&9`z_jJGvQjDK=z*O=MUhIGbbalWqw0;BMvkn%sg_NMP6Ozdatm%Xw
zv*u(c*lepA4X6I&OGS=-IKdJUayS>>?1W{pB_gpGSCq*<xgxN@6>xE!tj+iSb;!26
zbwWL2q*9N{tC#A*_UmwkY{B3C1D4r?I&ifL-WgtIbp&`PduFCotEeQL-v!y2)WueO
zxC`=02s^tVmDHf=1|+A}>I@DU4Gw8sby#u(uH(`h5RV1l!uiw#I$*sKSg!%=RbU*>
z`3_RZ5q$4E$RG_^@ICyToWO!!$QK(#A}r&GO|D{w`A^~8n=mE4LATOIb){t^``H+=
z(6ZrmHmbC*;qjX=ku>2KHzA2N-8W$_n}UtJj(Od%(0)oM_>58T8BMU-@JqRK8tIj8
zMsq>a-K^8y{Qq<}>vXq7>TWUUZqcUb>V*mDz6BFVBZh84T6&v~q&<?P-9XZ=k^FQJ
z#-nr_7T7QA(0x(Nx~t)rq%Ugp4ZEr>_o{}!tK!ezhWt65q9w1==k~Y*?kd?CsIHX#
z(u&zx+3YpvZipmtZ7@4?ZenIB$G&l#Nnnh2u3N;*gD?><-GNxrqxm#2yubXAh`W1X
z8qs!^?-_|4-mN}uxBBp-e?ksFFfuf^8;ai;(k-JW(u5v^2~S3lRd*nte;%pjWuz8D
za91x(ACZEm@4#ea?oVZknelP?l54{TUojAAVMex{y;@RaQh6CE;iW->8JG4!F`;;<
z4~j@U-soeQVv0oUTKdXUEu6<FH~PGuBpllhwk+f5^!HtUCo_M&Y#fVoS5yWA$+)H;
zW==B>=XqRPof~D@St<LRjAjZR?`J7%3SR1m3FD3NmVh@2f9QuvYg0!>7F7piCNi$d
zRUR}-;3zX>G#ihV4B2?BG<+b{hL4fB7}b**c<?SbXuR5&AaMz5pABE#g-NMXbbOu0
zBiYGpW{E5bi|)b9l4M<Yf^m9AuZE}U;?j(9X<A&GCgsXKh_`1j%xD40lGv%qWx(MI
zlEuQqc{x{uW?^$LOv2d%u!eME-2lrxxmwp(15l8juVX7z*~00oFoH}=Uxk=42wuBG
zhZ#P%#bE`J=eAf=P^?YI&TSqmR?Y{ka4otI9<^er&t3OnCaYJ%1DKt<Lg&Y7qaUj^
z%GD}#@0wCwxXT#s(!$k7u-I&ztOK}>0JjEkW5E!V<6A>4)$DFH+4+!&r&>*mSV<~)
z2+8SNbPi-P(}H0GGDmkHsF4Nn<A?D1?rI&z9*MCVFgta@A$42+J41-khBtX+3bjkH
z)JSy_SHm9pT8XQbEDQXVPLIc@9#fASFFs<mv<{y=g01QMb@clp+4mX958!VfLvd_9
zJACiTe)T4{w9MsaH=BC=_hazp9MO?As9KxCZZt$_ZDP;tomac~Mu}_0h9~STXhQKR
zteSg*k&GVLQ^uV+rJ>b^KO=GM!&aQrL|OT3cnY&v<-7P4X3}PLJlVpvD6gJE%s8@8
zk<6C2=-z{(ML8(aveRU;LcXG<=9rYiO83g}&C<eFjlXE6$z(e2X{Aj#uZ=DtsrX?V
z%^_L%*EV{Z9h>c6(*-17`P0|*1Tm$Fg;@DFddXBGW?)e}-DFJ_r48&FRU*3ZkL`3I
zP8KbJt!ZM)x^nOO%}(Z7S*BZ2F4FnDxJ8`q54ZxdhutJvAgIvG^bfo#B-SW}y>t?f
z2`1~Crg&1TEHPOR5v8GnPK;)o_xUZlTi7oOf5y;lIt|y|rW472+<BXRL=GSe{fV-?
zhxYTPdX4CWz%DZP@6jgloF*+t^1I3dITx^Xh`x<O1GJE|;hTf>SLCvy4ALNBDI)Cw
z?IzvBFzHDoEJU8GxV0v;lz}rI((Qb{gqeeM2fqG@0&yraAJJXJk|su%%N5c)c;Ycl
z2!AV4E<UFFc=lsr)xT&DO9T1O=r&TKoP9=D@I=OOFX*p_mya*#Cf??e@;1xWPVWa5
ztVC4$yxxt>8Zj{+551tX6vr!in2#FnDb;#@M67wd=2omps1+>^jOn3qxGl>1A-j@N
zux68LECA~!vJ*cA>tDvyNO8lwb<%!ihsC;&=z2t}^(<kQwvMqDF$>2;Tj#T8Nwjqp
qX;gk6ZLJa5g)J?{+Ca`Jmtw3#gj`UXW35gTX;zXZu*pm<689g-;Mqz5

diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs
index 901c15e9756..de97844eccb 100644
--- a/substrate/frame/revive/rpc/src/client.rs
+++ b/substrate/frame/revive/rpc/src/client.rs
@@ -17,7 +17,7 @@
 //! The client connects to the source substrate chain
 //! and is used by the rpc server to query and send transactions to the substrate chain.
 use crate::{
-	runtime::GAS_PRICE,
+	runtime::gas_from_fee,
 	subxt_client::{
 		revive::{calls::types::EthTransact, events::ContractEmitted},
 		runtime_types::pallet_revive::storage::ContractInfo,
@@ -771,7 +771,7 @@ impl Client {
 	pub async fn evm_block(&self, block: Arc<SubstrateBlock>) -> Result<Block, ClientError> {
 		let runtime_api = self.inner.api.runtime_api().at(block.hash());
 		let max_fee = Self::weight_to_fee(&runtime_api, self.max_block_weight()).await?;
-		let gas_limit = U256::from(max_fee / GAS_PRICE as u128);
+		let gas_limit = gas_from_fee(max_fee);
 
 		let header = block.header();
 		let timestamp = extract_block_timestamp(&block).await.unwrap_or_default();
diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs
index ccd8bb043e9..230f2f8b7ef 100644
--- a/substrate/frame/revive/rpc/src/lib.rs
+++ b/substrate/frame/revive/rpc/src/lib.rs
@@ -148,31 +148,12 @@ impl EthRpcServer for EthRpcServerImpl {
 
 	async fn send_raw_transaction(&self, transaction: Bytes) -> RpcResult<H256> {
 		let hash = H256(keccak_256(&transaction.0));
-
-		let tx = TransactionSigned::decode(&transaction.0).map_err(|err| {
-			log::debug!(target: LOG_TARGET, "Failed to decode transaction: {err:?}");
-			EthRpcError::from(err)
-		})?;
-
-		let eth_addr = tx.recover_eth_address().map_err(|err| {
-			log::debug!(target: LOG_TARGET, "Failed to recover eth address: {err:?}");
-			EthRpcError::InvalidSignature
-		})?;
-
-		let tx = GenericTransaction::from_signed(tx, Some(eth_addr));
-
-		// Dry run the transaction to get the weight limit and storage deposit limit
-		let dry_run = self.client.dry_run(tx, BlockTag::Latest.into()).await?;
-
-		let call = subxt_client::tx().revive().eth_transact(
-			transaction.0,
-			dry_run.gas_required.into(),
-			dry_run.storage_deposit,
-		);
+		let call = subxt_client::tx().revive().eth_transact(transaction.0);
 		self.client.submit(call).await.map_err(|err| {
 			log::debug!(target: LOG_TARGET, "submit call failed: {err:?}");
 			err
 		})?;
+
 		log::debug!(target: LOG_TARGET, "send_raw_transaction hash: {hash:?}");
 		Ok(hash)
 	}
diff --git a/substrate/frame/revive/src/evm.rs b/substrate/frame/revive/src/evm.rs
index c3495fc0559..c8c967fbe09 100644
--- a/substrate/frame/revive/src/evm.rs
+++ b/substrate/frame/revive/src/evm.rs
@@ -19,4 +19,6 @@
 
 mod api;
 pub use api::*;
+mod gas_encoder;
+pub use gas_encoder::*;
 pub mod runtime;
diff --git a/substrate/frame/revive/src/evm/api/byte.rs b/substrate/frame/revive/src/evm/api/byte.rs
index df4ed1740ec..c2d64f8e5e4 100644
--- a/substrate/frame/revive/src/evm/api/byte.rs
+++ b/substrate/frame/revive/src/evm/api/byte.rs
@@ -116,7 +116,10 @@ macro_rules! impl_hex {
 
         impl Debug for $type {
             fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
-                write!(f, concat!(stringify!($type), "({})"), self.0.to_hex())
+				let hex_str = self.0.to_hex();
+				let truncated = &hex_str[..hex_str.len().min(100)];
+				let ellipsis = if hex_str.len() > 100 { "..." } else { "" };
+                write!(f, concat!(stringify!($type), "({}{})"), truncated,ellipsis)
             }
         }
 
diff --git a/substrate/frame/revive/src/evm/gas_encoder.rs b/substrate/frame/revive/src/evm/gas_encoder.rs
new file mode 100644
index 00000000000..ffdf8b13c04
--- /dev/null
+++ b/substrate/frame/revive/src/evm/gas_encoder.rs
@@ -0,0 +1,174 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//! Encodes/Decodes EVM gas values.
+
+use crate::Weight;
+use core::ops::{Div, Rem};
+use frame_support::pallet_prelude::CheckedShl;
+use sp_arithmetic::traits::{One, Zero};
+use sp_core::U256;
+
+// We use 3 digits to store each component.
+const SCALE: u128 = 100;
+
+/// Rounds up the given value to the nearest multiple of the mask.
+///
+/// # Panics
+/// Panics if the `mask` is zero.
+fn round_up<T>(value: T, mask: T) -> T
+where
+	T: One + Zero + Copy + Rem<Output = T> + Div<Output = T>,
+	<T as Rem>::Output: PartialEq,
+{
+	let rest = if value % mask == T::zero() { T::zero() } else { T::one() };
+	value / mask + rest
+}
+
+/// Rounds up the log2 of the given value to the nearest integer.
+fn log2_round_up<T>(val: T) -> u128
+where
+	T: Into<u128>,
+{
+	let val = val.into();
+	val.checked_ilog2()
+		.map(|v| if 1u128 << v == val { v } else { v + 1 })
+		.unwrap_or(0) as u128
+}
+
+mod private {
+	pub trait Sealed {}
+	impl Sealed for () {}
+}
+
+/// Encodes/Decodes EVM gas values.
+///
+/// # Note
+///
+/// This is defined as a trait rather than standalone functions to allow
+/// it to be added as an associated type to [`crate::Config`]. This way,
+/// it can be invoked without requiring the implementation bounds to be
+/// explicitly specified.
+///
+/// This trait is sealed and cannot be implemented by downstream crates.
+pub trait GasEncoder<Balance>: private::Sealed {
+	/// Encodes all components (deposit limit, weight reference time, and proof size) into a single
+	/// gas value.
+	fn encode(gas_limit: U256, weight: Weight, deposit: Balance) -> U256;
+
+	/// Decodes the weight and deposit from the encoded gas value.
+	/// Returns `None` if the gas value is invalid
+	fn decode(gas: U256) -> Option<(Weight, Balance)>;
+}
+
+impl<Balance> GasEncoder<Balance> for ()
+where
+	Balance: Zero + One + CheckedShl + Into<u128>,
+{
+	/// The encoding follows the pattern `g...grrppdd`, where:
+	/// - `dd`: log2 Deposit value, encoded in the lowest 2 digits.
+	/// - `pp`: log2 Proof size, encoded in the next 2 digits.
+	/// - `rr`: log2 Reference time, encoded in the next 2 digits.
+	/// - `g...g`: Gas limit, encoded in the highest digits.
+	///
+	/// # Note
+	/// - The deposit value is maxed by 2^99
+	fn encode(gas_limit: U256, weight: Weight, deposit: Balance) -> U256 {
+		let deposit: u128 = deposit.into();
+		let deposit_component = log2_round_up(deposit);
+
+		let proof_size = weight.proof_size();
+		let proof_size_component = SCALE * log2_round_up(proof_size);
+
+		let ref_time = weight.ref_time();
+		let ref_time_component = SCALE.pow(2) * log2_round_up(ref_time);
+
+		let components = U256::from(deposit_component + proof_size_component + ref_time_component);
+
+		let raw_gas_mask = U256::from(SCALE).pow(3.into());
+		let raw_gas_component = if gas_limit < raw_gas_mask.saturating_add(components) {
+			raw_gas_mask
+		} else {
+			round_up(gas_limit, raw_gas_mask).saturating_mul(raw_gas_mask)
+		};
+
+		components.saturating_add(raw_gas_component)
+	}
+
+	fn decode(gas: U256) -> Option<(Weight, Balance)> {
+		let deposit = gas % SCALE;
+
+		// Casting with as_u32 is safe since all values are maxed by `SCALE`.
+		let deposit = deposit.as_u32();
+		let proof_time = ((gas / SCALE) % SCALE).as_u32();
+		let ref_time = ((gas / SCALE.pow(2)) % SCALE).as_u32();
+
+		let weight = Weight::from_parts(
+			if ref_time == 0 { 0 } else { 1u64.checked_shl(ref_time)? },
+			if proof_time == 0 { 0 } else { 1u64.checked_shl(proof_time)? },
+		);
+		let deposit =
+			if deposit == 0 { Balance::zero() } else { Balance::one().checked_shl(deposit)? };
+
+		Some((weight, deposit))
+	}
+}
+
+#[cfg(test)]
+mod test {
+	use super::*;
+
+	#[test]
+	fn test_gas_encoding_decoding_works() {
+		let raw_gas_limit = 111_111_999_999_999u128;
+		let weight = Weight::from_parts(222_999_999, 333_999_999);
+		let deposit = 444_999_999u64;
+
+		let encoded_gas = <() as GasEncoder<u64>>::encode(raw_gas_limit.into(), weight, deposit);
+		assert_eq!(encoded_gas, U256::from(111_112_000_282_929u128));
+		assert!(encoded_gas > raw_gas_limit.into());
+
+		let (decoded_weight, decoded_deposit) =
+			<() as GasEncoder<u64>>::decode(encoded_gas).unwrap();
+		assert!(decoded_weight.all_gte(weight));
+		assert!(weight.mul(2).all_gte(weight));
+
+		assert!(decoded_deposit >= deposit);
+		assert!(deposit * 2 >= decoded_deposit);
+	}
+
+	#[test]
+	fn test_encoding_zero_values_work() {
+		let encoded_gas = <() as GasEncoder<u64>>::encode(
+			Default::default(),
+			Default::default(),
+			Default::default(),
+		);
+
+		assert_eq!(encoded_gas, U256::from(1_00_00_00));
+
+		let (decoded_weight, decoded_deposit) =
+			<() as GasEncoder<u64>>::decode(encoded_gas).unwrap();
+		assert_eq!(Weight::default(), decoded_weight);
+		assert_eq!(0u64, decoded_deposit);
+	}
+
+	#[test]
+	fn test_overflow() {
+		assert_eq!(None, <() as GasEncoder<u64>>::decode(65_00u128.into()), "Invalid proof size");
+		assert_eq!(None, <() as GasEncoder<u64>>::decode(65_00_00u128.into()), "Invalid ref_time");
+	}
+}
diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs
index 24b75de8356..d4b344e20eb 100644
--- a/substrate/frame/revive/src/evm/runtime.rs
+++ b/substrate/frame/revive/src/evm/runtime.rs
@@ -16,9 +16,13 @@
 // limitations under the License.
 //! Runtime types for integrating `pallet-revive` with the EVM.
 use crate::{
-	evm::api::{GenericTransaction, TransactionSigned},
-	AccountIdOf, AddressMapper, BalanceOf, MomentOf, Weight, LOG_TARGET,
+	evm::{
+		api::{GenericTransaction, TransactionSigned},
+		GasEncoder,
+	},
+	AccountIdOf, AddressMapper, BalanceOf, Config, MomentOf, LOG_TARGET,
 };
+use alloc::vec::Vec;
 use codec::{Decode, Encode};
 use frame_support::{
 	dispatch::{DispatchInfo, GetDispatchInfo},
@@ -26,20 +30,17 @@ use frame_support::{
 };
 use pallet_transaction_payment::OnChargeTransaction;
 use scale_info::{StaticTypeInfo, TypeInfo};
-use sp_arithmetic::Percent;
 use sp_core::{Get, H256, U256};
 use sp_runtime::{
 	generic::{self, CheckedExtrinsic, ExtrinsicFormat},
 	traits::{
-		self, Checkable, Dispatchable, ExtrinsicLike, ExtrinsicMetadata, IdentifyAccount, Member,
-		TransactionExtension,
+		self, AtLeast32BitUnsigned, Checkable, Dispatchable, ExtrinsicLike, ExtrinsicMetadata,
+		IdentifyAccount, Member, TransactionExtension,
 	},
 	transaction_validity::{InvalidTransaction, TransactionValidityError},
 	OpaqueExtrinsic, RuntimeDebug, Saturating,
 };
 
-use alloc::vec::Vec;
-
 type CallOf<T> = <T as frame_system::Config>::RuntimeCall;
 
 /// The EVM gas price.
@@ -48,7 +49,28 @@ type CallOf<T> = <T as frame_system::Config>::RuntimeCall;
 /// We use a fixed value for the gas price.
 /// This let us calculate the gas estimate for a transaction with the formula:
 /// `estimate_gas = substrate_fee / gas_price`.
-pub const GAS_PRICE: u32 = 1u32;
+///
+/// The chosen constant value is:
+/// - Not too high, ensuring the gas value is large enough (at least 7 digits) to encode the
+///   ref_time, proof_size, and deposit into the less significant (6 lower) digits of the gas value.
+/// - Not too low, enabling users to adjust the gas price to define a tip.
+pub const GAS_PRICE: u32 = 1_000u32;
+
+/// Convert a `Balance` into a gas value, using the fixed `GAS_PRICE`.
+/// The gas is calculated as `balance / GAS_PRICE`, rounded up to the nearest integer.
+pub fn gas_from_fee<Balance>(fee: Balance) -> U256
+where
+	u32: Into<Balance>,
+	Balance: Into<U256> + AtLeast32BitUnsigned + Copy,
+{
+	let gas_price = GAS_PRICE.into();
+	let remainder = fee % gas_price;
+	if remainder.is_zero() {
+		(fee / gas_price).into()
+	} else {
+		(fee.saturating_add(gas_price) / gas_price).into()
+	}
+}
 
 /// Wraps [`generic::UncheckedExtrinsic`] to support checking unsigned
 /// [`crate::Call::eth_transact`] extrinsic.
@@ -140,15 +162,8 @@ where
 	fn check(self, lookup: &Lookup) -> Result<Self::Checked, TransactionValidityError> {
 		if !self.0.is_signed() {
 			if let Ok(call) = self.0.function.clone().try_into() {
-				if let crate::Call::eth_transact { payload, gas_limit, storage_deposit_limit } =
-					call
-				{
-					let checked = E::try_into_checked_extrinsic(
-						payload,
-						gas_limit,
-						storage_deposit_limit,
-						self.encoded_size(),
-					)?;
+				if let crate::Call::eth_transact { payload } = call {
+					let checked = E::try_into_checked_extrinsic(payload, self.encoded_size())?;
 					return Ok(checked)
 				};
 			}
@@ -251,7 +266,7 @@ where
 /// EthExtra convert an unsigned [`crate::Call::eth_transact`] into a [`CheckedExtrinsic`].
 pub trait EthExtra {
 	/// The Runtime configuration.
-	type Config: crate::Config + pallet_transaction_payment::Config;
+	type Config: Config + pallet_transaction_payment::Config;
 
 	/// The Runtime's transaction extension.
 	/// It should include at least:
@@ -281,8 +296,6 @@ pub trait EthExtra {
 	/// - `encoded_len`: The encoded length of the extrinsic.
 	fn try_into_checked_extrinsic(
 		payload: Vec<u8>,
-		gas_limit: Weight,
-		storage_deposit_limit: BalanceOf<Self::Config>,
 		encoded_len: usize,
 	) -> Result<
 		CheckedExtrinsic<AccountIdOf<Self::Config>, CallOf<Self::Config>, Self::Extension>,
@@ -307,12 +320,16 @@ pub trait EthExtra {
 			InvalidTransaction::BadProof
 		})?;
 
-		let signer =
-			<Self::Config as crate::Config>::AddressMapper::to_fallback_account_id(&signer);
+		let signer = <Self::Config as Config>::AddressMapper::to_fallback_account_id(&signer);
 		let GenericTransaction { nonce, chain_id, to, value, input, gas, gas_price, .. } =
 			GenericTransaction::from_signed(tx, None);
 
-		if chain_id.unwrap_or_default() != <Self::Config as crate::Config>::ChainId::get().into() {
+		let Some(gas) = gas else {
+			log::debug!(target: LOG_TARGET, "No gas provided");
+			return Err(InvalidTransaction::Call);
+		};
+
+		if chain_id.unwrap_or_default() != <Self::Config as Config>::ChainId::get().into() {
 			log::debug!(target: LOG_TARGET, "Invalid chain_id {chain_id:?}");
 			return Err(InvalidTransaction::Call);
 		}
@@ -324,6 +341,13 @@ pub trait EthExtra {
 			})?;
 
 		let data = input.unwrap_or_default().0;
+
+		let (gas_limit, storage_deposit_limit) =
+			<Self::Config as Config>::EthGasEncoder::decode(gas).ok_or_else(|| {
+				log::debug!(target: LOG_TARGET, "Failed to decode gas: {gas:?}");
+				InvalidTransaction::Call
+			})?;
+
 		let call = if let Some(dest) = to {
 			crate::Call::call::<Self::Config> {
 				dest,
@@ -359,13 +383,13 @@ pub trait EthExtra {
 		// Fees calculated with the fixed `GAS_PRICE`
 		// When we dry-run the transaction, we set the gas to `Fee / GAS_PRICE`
 		let eth_fee_no_tip = U256::from(GAS_PRICE)
-			.saturating_mul(gas.unwrap_or_default())
+			.saturating_mul(gas)
 			.try_into()
 			.map_err(|_| InvalidTransaction::Call)?;
 
 		// Fees with the actual gas_price from the transaction.
 		let eth_fee: BalanceOf<Self::Config> = U256::from(gas_price.unwrap_or_default())
-			.saturating_mul(gas.unwrap_or_default())
+			.saturating_mul(gas)
 			.try_into()
 			.map_err(|_| InvalidTransaction::Call)?;
 
@@ -380,27 +404,17 @@ pub trait EthExtra {
 				Default::default(),
 			)
 			.into();
-		log::trace!(target: LOG_TARGET, "try_into_checked_extrinsic: encoded_len: {encoded_len:?} actual_fee: {actual_fee:?} eth_fee: {eth_fee:?}");
+		log::debug!(target: LOG_TARGET, "try_into_checked_extrinsic: gas_price: {gas_price:?}, encoded_len: {encoded_len:?} actual_fee: {actual_fee:?} eth_fee: {eth_fee:?}");
 
 		// The fees from the Ethereum transaction should be greater or equal to the actual fees paid
 		// by the account.
 		if eth_fee < actual_fee {
-			log::debug!(target: LOG_TARGET, "fees {eth_fee:?} too low for the extrinsic {actual_fee:?}");
+			log::debug!(target: LOG_TARGET, "eth fees {eth_fee:?} too low, actual fees: {actual_fee:?}");
 			return Err(InvalidTransaction::Payment.into())
 		}
 
-		let min = actual_fee.min(eth_fee_no_tip);
-		let max = actual_fee.max(eth_fee_no_tip);
-		let diff = Percent::from_rational(max - min, min);
-		if diff > Percent::from_percent(10) {
-			log::trace!(target: LOG_TARGET, "Difference between the extrinsic fees {actual_fee:?} and the Ethereum gas fees {eth_fee_no_tip:?} should be no more than 10% got {diff:?}");
-			return Err(InvalidTransaction::Call.into())
-		} else {
-			log::trace!(target: LOG_TARGET, "Difference between the extrinsic fees {actual_fee:?} and the Ethereum gas fees {eth_fee_no_tip:?}:  {diff:?}");
-		}
-
 		let tip = eth_fee.saturating_sub(eth_fee_no_tip);
-		log::debug!(target: LOG_TARGET, "Created checked Ethereum transaction with nonce {nonce:?} and tip: {tip:?}");
+		log::debug!(target: LOG_TARGET, "Created checked Ethereum transaction with nonce: {nonce:?} and tip: {tip:?}");
 		Ok(CheckedExtrinsic {
 			format: ExtrinsicFormat::Signed(signer.into(), Self::get_eth_extension(nonce, tip)),
 			function,
@@ -415,6 +429,7 @@ mod test {
 		evm::*,
 		test_utils::*,
 		tests::{ExtBuilder, RuntimeCall, RuntimeOrigin, Test},
+		Weight,
 	};
 	use frame_support::{error::LookupError, traits::fungible::Mutate};
 	use pallet_revive_fixtures::compile_module;
@@ -456,8 +471,6 @@ mod test {
 	#[derive(Clone)]
 	struct UncheckedExtrinsicBuilder {
 		tx: GenericTransaction,
-		gas_limit: Weight,
-		storage_deposit_limit: BalanceOf<Test>,
 		before_validate: Option<std::sync::Arc<dyn Fn() + Send + Sync>>,
 	}
 
@@ -467,12 +480,10 @@ mod test {
 			Self {
 				tx: GenericTransaction {
 					from: Some(Account::default().address()),
-					chain_id: Some(<Test as crate::Config>::ChainId::get().into()),
+					chain_id: Some(<Test as Config>::ChainId::get().into()),
 					gas_price: Some(U256::from(GAS_PRICE)),
 					..Default::default()
 				},
-				gas_limit: Weight::zero(),
-				storage_deposit_limit: 0,
 				before_validate: None,
 			}
 		}
@@ -500,7 +511,6 @@ mod test {
 		fn call_with(dest: H160) -> Self {
 			let mut builder = Self::new();
 			builder.tx.to = Some(dest);
-			ExtBuilder::default().build().execute_with(|| builder.estimate_gas());
 			builder
 		}
 
@@ -508,45 +518,42 @@ mod test {
 		fn instantiate_with(code: Vec<u8>, data: Vec<u8>) -> Self {
 			let mut builder = Self::new();
 			builder.tx.input = Some(Bytes(code.into_iter().chain(data.into_iter()).collect()));
-			ExtBuilder::default().build().execute_with(|| builder.estimate_gas());
 			builder
 		}
 
-		/// Update the transaction with the given function.
-		fn update(mut self, f: impl FnOnce(&mut GenericTransaction) -> ()) -> Self {
-			f(&mut self.tx);
-			self
-		}
 		/// Set before_validate function.
 		fn before_validate(mut self, f: impl Fn() + Send + Sync + 'static) -> Self {
 			self.before_validate = Some(std::sync::Arc::new(f));
 			self
 		}
 
+		fn check(
+			self,
+		) -> Result<(RuntimeCall, SignedExtra, GenericTransaction), TransactionValidityError> {
+			self.mutate_estimate_and_check(Box::new(|_| ()))
+		}
+
 		/// Call `check` on the unchecked extrinsic, and `pre_dispatch` on the signed extension.
-		fn check(&self) -> Result<(RuntimeCall, SignedExtra), TransactionValidityError> {
+		fn mutate_estimate_and_check(
+			mut self,
+			f: Box<dyn FnOnce(&mut GenericTransaction) -> ()>,
+		) -> Result<(RuntimeCall, SignedExtra, GenericTransaction), TransactionValidityError> {
+			ExtBuilder::default().build().execute_with(|| self.estimate_gas());
+			f(&mut self.tx);
 			ExtBuilder::default().build().execute_with(|| {
-				let UncheckedExtrinsicBuilder {
-					tx,
-					gas_limit,
-					storage_deposit_limit,
-					before_validate,
-				} = self.clone();
+				let UncheckedExtrinsicBuilder { tx, before_validate, .. } = self.clone();
 
 				// Fund the account.
 				let account = Account::default();
-				let _ = <Test as crate::Config>::Currency::set_balance(
+				let _ = <Test as Config>::Currency::set_balance(
 					&account.substrate_account(),
 					100_000_000_000_000,
 				);
 
-				let payload =
-					account.sign_transaction(tx.try_into_unsigned().unwrap()).signed_payload();
-				let call = RuntimeCall::Contracts(crate::Call::eth_transact {
-					payload,
-					gas_limit,
-					storage_deposit_limit,
-				});
+				let payload = account
+					.sign_transaction(tx.clone().try_into_unsigned().unwrap())
+					.signed_payload();
+				let call = RuntimeCall::Contracts(crate::Call::eth_transact { payload });
 
 				let encoded_len = call.encoded_size();
 				let uxt: Ex = generic::UncheckedExtrinsic::new_bare(call).into();
@@ -565,7 +572,7 @@ mod test {
 					0,
 				)?;
 
-				Ok((result.function, extra))
+				Ok((result.function, extra, tx))
 			})
 		}
 	}
@@ -573,14 +580,18 @@ mod test {
 	#[test]
 	fn check_eth_transact_call_works() {
 		let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20]));
+		let (call, _, tx) = builder.check().unwrap();
+		let (gas_limit, storage_deposit_limit) =
+			<<Test as Config>::EthGasEncoder as GasEncoder<_>>::decode(tx.gas.unwrap()).unwrap();
+
 		assert_eq!(
-			builder.check().unwrap().0,
+			call,
 			crate::Call::call::<Test> {
-				dest: builder.tx.to.unwrap(),
-				value: builder.tx.value.unwrap_or_default().as_u64(),
-				gas_limit: builder.gas_limit,
-				storage_deposit_limit: builder.storage_deposit_limit,
-				data: builder.tx.input.unwrap_or_default().0
+				dest: tx.to.unwrap(),
+				value: tx.value.unwrap_or_default().as_u64(),
+				data: tx.input.unwrap_or_default().0,
+				gas_limit,
+				storage_deposit_limit
 			}
 			.into()
 		);
@@ -591,16 +602,19 @@ mod test {
 		let (code, _) = compile_module("dummy").unwrap();
 		let data = vec![];
 		let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone());
+		let (call, _, tx) = builder.check().unwrap();
+		let (gas_limit, storage_deposit_limit) =
+			<<Test as Config>::EthGasEncoder as GasEncoder<_>>::decode(tx.gas.unwrap()).unwrap();
 
 		assert_eq!(
-			builder.check().unwrap().0,
+			call,
 			crate::Call::instantiate_with_code::<Test> {
-				value: builder.tx.value.unwrap_or_default().as_u64(),
-				gas_limit: builder.gas_limit,
-				storage_deposit_limit: builder.storage_deposit_limit,
+				value: tx.value.unwrap_or_default().as_u64(),
 				code,
 				data,
-				salt: None
+				salt: None,
+				gas_limit,
+				storage_deposit_limit
 			}
 			.into()
 		);
@@ -608,11 +622,10 @@ mod test {
 
 	#[test]
 	fn check_eth_transact_nonce_works() {
-		let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20]))
-			.update(|tx| tx.nonce = Some(1u32.into()));
+		let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20]));
 
 		assert_eq!(
-			builder.check(),
+			builder.mutate_estimate_and_check(Box::new(|tx| tx.nonce = Some(1u32.into()))),
 			Err(TransactionValidityError::Invalid(InvalidTransaction::Future))
 		);
 
@@ -629,11 +642,10 @@ mod test {
 
 	#[test]
 	fn check_eth_transact_chain_id_works() {
-		let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20]))
-			.update(|tx| tx.chain_id = Some(42.into()));
+		let builder = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20]));
 
 		assert_eq!(
-			builder.check(),
+			builder.mutate_estimate_and_check(Box::new(|tx| tx.chain_id = Some(42.into()))),
 			Err(TransactionValidityError::Invalid(InvalidTransaction::Call))
 		);
 	}
@@ -646,14 +658,14 @@ mod test {
 
 		// Fail because the tx input fail to get the blob length
 		assert_eq!(
-			builder.clone().update(|tx| tx.input = Some(Bytes(vec![1, 2, 3]))).check(),
+			builder.mutate_estimate_and_check(Box::new(|tx| tx.input = Some(Bytes(vec![1, 2, 3])))),
 			Err(TransactionValidityError::Invalid(InvalidTransaction::Call))
 		);
 	}
 
 	#[test]
 	fn check_transaction_fees() {
-		let scenarios: [(_, Box<dyn FnOnce(&mut GenericTransaction)>, _); 5] = [
+		let scenarios: Vec<(_, Box<dyn FnOnce(&mut GenericTransaction)>, _)> = vec![
 			(
 				"Eth fees too low",
 				Box::new(|tx| {
@@ -661,42 +673,20 @@ mod test {
 				}),
 				InvalidTransaction::Payment,
 			),
-			(
-				"Gas fees too high",
-				Box::new(|tx| {
-					tx.gas = Some(tx.gas.unwrap() * 2);
-				}),
-				InvalidTransaction::Call,
-			),
 			(
 				"Gas fees too low",
 				Box::new(|tx| {
-					tx.gas = Some(tx.gas.unwrap() * 2);
-				}),
-				InvalidTransaction::Call,
-			),
-			(
-				"Diff > 10%",
-				Box::new(|tx| {
-					tx.gas = Some(tx.gas.unwrap() * 111 / 100);
+					tx.gas = Some(tx.gas.unwrap() / 2);
 				}),
-				InvalidTransaction::Call,
-			),
-			(
-				"Diff < 10%",
-				Box::new(|tx| {
-					tx.gas_price = Some(tx.gas_price.unwrap() * 2);
-					tx.gas = Some(tx.gas.unwrap() * 89 / 100);
-				}),
-				InvalidTransaction::Call,
+				InvalidTransaction::Payment,
 			),
 		];
 
 		for (msg, update_tx, err) in scenarios {
-			let builder =
-				UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20])).update(update_tx);
+			let res = UncheckedExtrinsicBuilder::call_with(H160::from([1u8; 20]))
+				.mutate_estimate_and_check(update_tx);
 
-			assert_eq!(builder.check(), Err(TransactionValidityError::Invalid(err)), "{}", msg);
+			assert_eq!(res, Err(TransactionValidityError::Invalid(err)), "{}", msg);
 		}
 	}
 
@@ -704,16 +694,16 @@ mod test {
 	fn check_transaction_tip() {
 		let (code, _) = compile_module("dummy").unwrap();
 		let data = vec![];
-		let builder = UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone())
-			.update(|tx| {
-				tx.gas_price = Some(tx.gas_price.unwrap() * 103 / 100);
-				log::debug!(target: LOG_TARGET, "Gas price: {:?}", tx.gas_price);
-			});
+		let (_, extra, tx) =
+			UncheckedExtrinsicBuilder::instantiate_with(code.clone(), data.clone())
+				.mutate_estimate_and_check(Box::new(|tx| {
+					tx.gas_price = Some(tx.gas_price.unwrap() * 103 / 100);
+					log::debug!(target: LOG_TARGET, "Gas price: {:?}", tx.gas_price);
+				}))
+				.unwrap();
 
-		let tx = &builder.tx;
 		let expected_tip =
 			tx.gas_price.unwrap() * tx.gas.unwrap() - U256::from(GAS_PRICE) * tx.gas.unwrap();
-		let (_, extra) = builder.check().unwrap();
 		assert_eq!(U256::from(extra.1.tip()), expected_tip);
 	}
 }
diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs
index b9a39e7ce4d..04bce264a18 100644
--- a/substrate/frame/revive/src/lib.rs
+++ b/substrate/frame/revive/src/lib.rs
@@ -41,7 +41,10 @@ pub mod test_utils;
 pub mod weights;
 
 use crate::{
-	evm::{runtime::GAS_PRICE, GenericTransaction},
+	evm::{
+		runtime::{gas_from_fee, GAS_PRICE},
+		GasEncoder, GenericTransaction,
+	},
 	exec::{AccountIdOf, ExecError, Executable, Ext, Key, Origin, Stack as ExecStack},
 	gas::GasMeter,
 	storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager},
@@ -295,6 +298,11 @@ pub mod pallet {
 		/// The ratio between the decimal representation of the native token and the ETH token.
 		#[pallet::constant]
 		type NativeToEthRatio: Get<u32>;
+
+		/// Encode and decode Ethereum gas values.
+		/// Only valid value is `()`. See [`GasEncoder`].
+		#[pallet::no_default_bounds]
+		type EthGasEncoder: GasEncoder<BalanceOf<Self>>;
 	}
 
 	/// Container for different types that implement [`DefaultConfig`]` of this pallet.
@@ -368,6 +376,7 @@ pub mod pallet {
 			type PVFMemory = ConstU32<{ 512 * 1024 * 1024 }>;
 			type ChainId = ConstU64<0>;
 			type NativeToEthRatio = ConstU32<1>;
+			type EthGasEncoder = ();
 		}
 	}
 
@@ -560,6 +569,8 @@ pub mod pallet {
 		AccountUnmapped,
 		/// Tried to map an account that is already mapped.
 		AccountAlreadyMapped,
+		/// The transaction used to dry-run a contract is invalid.
+		InvalidGenericTransaction,
 	}
 
 	/// A reason for the pallet contracts placing a hold on funds.
@@ -761,12 +772,7 @@ pub mod pallet {
 		#[allow(unused_variables)]
 		#[pallet::call_index(0)]
 		#[pallet::weight(Weight::MAX)]
-		pub fn eth_transact(
-			origin: OriginFor<T>,
-			payload: Vec<u8>,
-			gas_limit: Weight,
-			#[pallet::compact] storage_deposit_limit: BalanceOf<T>,
-		) -> DispatchResultWithPostInfo {
+		pub fn eth_transact(origin: OriginFor<T>, payload: Vec<u8>) -> DispatchResultWithPostInfo {
 			Err(frame_system::Error::CallFiltered::<T>.into())
 		}
 
@@ -1406,11 +1412,8 @@ where
 				return Err(EthTransactError::Message("Invalid transaction".into()));
 			};
 
-			let eth_dispatch_call = crate::Call::<T>::eth_transact {
-				payload: unsigned_tx.dummy_signed_payload(),
-				gas_limit: result.gas_required,
-				storage_deposit_limit: result.storage_deposit,
-			};
+			let eth_dispatch_call =
+				crate::Call::<T>::eth_transact { payload: unsigned_tx.dummy_signed_payload() };
 			let encoded_len = utx_encoded_size(eth_dispatch_call);
 			let fee = pallet_transaction_payment::Pallet::<T>::compute_fee(
 				encoded_len,
@@ -1418,7 +1421,9 @@ where
 				0u32.into(),
 			)
 			.into();
-			let eth_gas: U256 = (fee / GAS_PRICE.into()).into();
+			let eth_gas = gas_from_fee(fee);
+			let eth_gas =
+				T::EthGasEncoder::encode(eth_gas, result.gas_required, result.storage_deposit);
 
 			if eth_gas == result.eth_gas {
 				log::trace!(target: LOG_TARGET, "bare_eth_call: encoded_len: {encoded_len:?} eth_gas: {eth_gas:?}");
-- 
GitLab


From f0eec07f93759331e6520ccc67f3d3291f0122c4 Mon Sep 17 00:00:00 2001
From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com>
Date: Mon, 13 Jan 2025 18:38:52 +0200
Subject: [PATCH 043/116] Increase the number of pvf execute workers (#7116)

Reference hardware requirements have been bumped to at least 8 cores so
we can no allocate 50% of that capacity to PVF execution.

---------

Signed-off-by: Alexandru Gheorghe <alexandru.gheorghe@parity.io>
---
 polkadot/node/service/src/lib.rs | 11 +++--------
 prdoc/pr_7116.prdoc              |  8 ++++++++
 2 files changed, 11 insertions(+), 8 deletions(-)
 create mode 100644 prdoc/pr_7116.prdoc

diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs
index 227bc525399..820cce8d083 100644
--- a/polkadot/node/service/src/lib.rs
+++ b/polkadot/node/service/src/lib.rs
@@ -944,14 +944,9 @@ pub fn new_full<
 				secure_validator_mode,
 				prep_worker_path,
 				exec_worker_path,
-				pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or_else(
-					|| match config.chain_spec.identify_chain() {
-						// The intention is to use this logic for gradual increasing from 2 to 4
-						// of this configuration chain by chain until it reaches production chain.
-						Chain::Polkadot | Chain::Kusama => 2,
-						Chain::Rococo | Chain::Westend | Chain::Unknown => 4,
-					},
-				),
+				// Default execution workers is 4 because we have 8 cores on the reference hardware,
+				// and this accounts for 50% of that cpu capacity.
+				pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or(4),
 				pvf_prepare_workers_soft_max_num: prepare_workers_soft_max_num.unwrap_or(1),
 				pvf_prepare_workers_hard_max_num: prepare_workers_hard_max_num.unwrap_or(2),
 			})
diff --git a/prdoc/pr_7116.prdoc b/prdoc/pr_7116.prdoc
new file mode 100644
index 00000000000..95a5254778a
--- /dev/null
+++ b/prdoc/pr_7116.prdoc
@@ -0,0 +1,8 @@
+title: Increase the number of pvf execution workers from 2 to 4
+doc:
+- audience: Node Dev
+  description: |-
+    Increase the number of pvf execution workers from 2 to 4.
+crates:
+- name: polkadot-service
+  bump: patch
-- 
GitLab


From 0e0fa4782e2872ea74d8038ebedb9f6e6be53457 Mon Sep 17 00:00:00 2001
From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com>
Date: Mon, 13 Jan 2025 18:42:22 +0100
Subject: [PATCH 044/116] `fatxpool`: rotator cache size now depends on pool's
 limits (#7102)

# Description

This PR modifies the hard-coded size of extrinsics cache within
[`PoolRotator`](https://github.com/paritytech/polkadot-sdk/blob/cdf107de700388a52a17b2fb852c98420c78278e/substrate/client/transaction-pool/src/graph/rotator.rs#L36-L45)
to be inline with pool limits.

The problem was, that due to small size (comparing to number of txs in
single block) of hard coded size:

https://github.com/paritytech/polkadot-sdk/blob/cdf107de700388a52a17b2fb852c98420c78278e/substrate/client/transaction-pool/src/graph/rotator.rs#L34
excessive number of unnecessary verification were performed in
`prune_tags`:

https://github.com/paritytech/polkadot-sdk/blob/cdf107de700388a52a17b2fb852c98420c78278e/substrate/client/transaction-pool/src/graph/pool.rs#L369-L370

This was resulting in quite long durations of `prune_tags` execution
time (which was ok for 6s, but becomes noticable for 2s blocks):
```
Pruning at HashAndNumber { number: 83, ... }. Resubmitting transactions: 6142, reverification took: 237.818955ms
Pruning at HashAndNumber { number: 84, ... }. Resubmitting transactions: 5985, reverification took: 222.118218ms
Pruning at HashAndNumber { number: 85, ... }. Resubmitting transactions: 5981, reverification took: 215.546847ms
```

The fix reduces the overhead:
```
Pruning at HashAndNumber { number: 92, ... }. Resubmitting transactions: 6325, reverification took: 14.728354ms
Pruning at HashAndNumber { number: 93, ... }. Resubmitting transactions: 7030, reverification took: 23.973607ms
Pruning at HashAndNumber { number: 94, ... }. Resubmitting transactions: 4465, reverification took: 9.532472ms
```

## Review Notes
I decided to leave the hardocded `EXPECTED_SIZE` for the legacy
transaction pool. Removing verification of transactions during
re-submission may negatively impact the behavior of the legacy
(single-state) pool. As in long-term we probably want to deprecate old
pool, I did not invest time to assess the impact of rotator change in
behavior of the legacy pool.

---------

Co-authored-by: command-bot <>
Co-authored-by: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com>
---
 prdoc/pr_7102.prdoc                           |  8 +++
 .../client/transaction-pool/benches/basics.rs | 12 ++++-
 .../transaction-pool/src/common/tests.rs      |  2 +-
 .../src/fork_aware_txpool/dropped_watcher.rs  |  4 +-
 .../fork_aware_txpool/fork_aware_txpool.rs    |  2 +-
 .../client/transaction-pool/src/graph/pool.rs | 49 ++++++++++++++-----
 .../transaction-pool/src/graph/rotator.rs     | 42 ++++++++++++----
 .../src/graph/validated_pool.rs               | 31 ++++++++++--
 .../src/single_state_txpool/revalidation.rs   | 12 ++++-
 .../single_state_txpool.rs                    | 12 ++++-
 .../client/transaction-pool/tests/fatp.rs     |  4 +-
 .../client/transaction-pool/tests/pool.rs     |  4 +-
 12 files changed, 144 insertions(+), 38 deletions(-)
 create mode 100644 prdoc/pr_7102.prdoc

diff --git a/prdoc/pr_7102.prdoc b/prdoc/pr_7102.prdoc
new file mode 100644
index 00000000000..b1923aafc3d
--- /dev/null
+++ b/prdoc/pr_7102.prdoc
@@ -0,0 +1,8 @@
+title: '`fatxpool`: rotator cache size now depends on pool''s limits'
+doc:
+- audience: Node Dev
+  description: |-
+    This PR modifies the hard-coded size of extrinsics cache within `PoolRotator` to be inline with pool limits. It only applies to fork-aware transaction pool. For the legacy (single-state) transaction pool the logic remains untouched.
+crates:
+- name: sc-transaction-pool
+  bump: minor
diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs
index 5e40b0fb72d..5ba9dd40c15 100644
--- a/substrate/client/transaction-pool/benches/basics.rs
+++ b/substrate/client/transaction-pool/benches/basics.rs
@@ -197,14 +197,22 @@ fn benchmark_main(c: &mut Criterion) {
 	c.bench_function("sequential 50 tx", |b| {
 		b.iter(|| {
 			let api = Arc::from(TestApi::new_dependant());
-			bench_configured(Pool::new(Default::default(), true.into(), api.clone()), 50, api);
+			bench_configured(
+				Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()),
+				50,
+				api,
+			);
 		});
 	});
 
 	c.bench_function("random 100 tx", |b| {
 		b.iter(|| {
 			let api = Arc::from(TestApi::default());
-			bench_configured(Pool::new(Default::default(), true.into(), api.clone()), 100, api);
+			bench_configured(
+				Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()),
+				100,
+				api,
+			);
 		});
 	});
 }
diff --git a/substrate/client/transaction-pool/src/common/tests.rs b/substrate/client/transaction-pool/src/common/tests.rs
index b00cf5fbfed..7f2cbe24d8e 100644
--- a/substrate/client/transaction-pool/src/common/tests.rs
+++ b/substrate/client/transaction-pool/src/common/tests.rs
@@ -222,5 +222,5 @@ pub(crate) fn uxt(transfer: Transfer) -> Extrinsic {
 
 pub(crate) fn pool() -> (Pool<TestApi>, Arc<TestApi>) {
 	let api = Arc::new(TestApi::default());
-	(Pool::new(Default::default(), true.into(), api.clone()), api)
+	(Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), api)
 }
diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs
index 7679e3b169d..d69aa37c94a 100644
--- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs
+++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs
@@ -329,14 +329,14 @@ where
 		let stream_map = futures::stream::unfold(ctx, |mut ctx| async move {
 			loop {
 				if let Some(dropped) = ctx.get_pending_dropped_transaction() {
-					debug!("dropped_watcher: sending out (pending): {dropped:?}");
+					trace!("dropped_watcher: sending out (pending): {dropped:?}");
 					return Some((dropped, ctx));
 				}
 				tokio::select! {
 					biased;
 					Some(event) = next_event(&mut ctx.stream_map) => {
 						if let Some(dropped) = ctx.handle_event(event.0, event.1) {
-							debug!("dropped_watcher: sending out: {dropped:?}");
+							trace!("dropped_watcher: sending out: {dropped:?}");
 							return Some((dropped, ctx));
 						}
 					},
diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs
index 4ec87f1fefa..e57256943cc 100644
--- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs
+++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs
@@ -318,7 +318,7 @@ where
 			pool_api.clone(),
 			listener.clone(),
 			metrics.clone(),
-			TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER * (options.ready.count + options.future.count),
+			TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER * options.total_count(),
 			options.ready.total_bytes + options.future.total_bytes,
 		));
 
diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs
index ff9cc1541af..4c0ace0b1c7 100644
--- a/substrate/client/transaction-pool/src/graph/pool.rs
+++ b/substrate/client/transaction-pool/src/graph/pool.rs
@@ -158,6 +158,13 @@ impl Default for Options {
 	}
 }
 
+impl Options {
+	/// Total (ready+future) maximal number of transactions in the pool.
+	pub fn total_count(&self) -> usize {
+		self.ready.count + self.future.count
+	}
+}
+
 /// Should we check that the transaction is banned
 /// in the pool, before we verify it?
 #[derive(Copy, Clone)]
@@ -172,6 +179,21 @@ pub struct Pool<B: ChainApi> {
 }
 
 impl<B: ChainApi> Pool<B> {
+	/// Create a new transaction pool with statically sized rotator.
+	pub fn new_with_staticly_sized_rotator(
+		options: Options,
+		is_validator: IsValidator,
+		api: Arc<B>,
+	) -> Self {
+		Self {
+			validated_pool: Arc::new(ValidatedPool::new_with_staticly_sized_rotator(
+				options,
+				is_validator,
+				api,
+			)),
+		}
+	}
+
 	/// Create a new transaction pool.
 	pub fn new(options: Options, is_validator: IsValidator, api: Arc<B>) -> Self {
 		Self { validated_pool: Arc::new(ValidatedPool::new(options, is_validator, api)) }
@@ -284,6 +306,7 @@ impl<B: ChainApi> Pool<B> {
 		let mut validated_counter: usize = 0;
 
 		let mut future_tags = Vec::new();
+		let now = Instant::now();
 		for (extrinsic, in_pool_tags) in all {
 			match in_pool_tags {
 				// reuse the tags for extrinsics that were found in the pool
@@ -319,7 +342,7 @@ impl<B: ChainApi> Pool<B> {
 			}
 		}
 
-		log::trace!(target: LOG_TARGET,"prune: validated_counter:{validated_counter}");
+		log::debug!(target: LOG_TARGET,"prune: validated_counter:{validated_counter}, took:{:?}", now.elapsed());
 
 		self.prune_tags(at, future_tags, in_pool_hashes).await
 	}
@@ -351,6 +374,7 @@ impl<B: ChainApi> Pool<B> {
 		tags: impl IntoIterator<Item = Tag>,
 		known_imported_hashes: impl IntoIterator<Item = ExtrinsicHash<B>> + Clone,
 	) {
+		let now = Instant::now();
 		log::trace!(target: LOG_TARGET, "Pruning at {:?}", at);
 		// Prune all transactions that provide given tags
 		let prune_status = self.validated_pool.prune_tags(tags);
@@ -369,9 +393,8 @@ impl<B: ChainApi> Pool<B> {
 		let reverified_transactions =
 			self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await;
 
-		let pruned_hashes = reverified_transactions.keys().map(Clone::clone).collect();
-
-		log::trace!(target: LOG_TARGET, "Pruning at {:?}. Resubmitting transactions: {}", &at, reverified_transactions.len());
+		let pruned_hashes = reverified_transactions.keys().map(Clone::clone).collect::<Vec<_>>();
+		log::debug!(target: LOG_TARGET, "Pruning at {:?}. Resubmitting transactions: {}, reverification took: {:?}", &at, reverified_transactions.len(), now.elapsed());
 		log_xt_trace!(data: tuple, target: LOG_TARGET, &reverified_transactions, "[{:?}] Resubmitting transaction: {:?}");
 
 		// And finally - submit reverified transactions back to the pool
@@ -580,7 +603,7 @@ mod tests {
 	fn should_reject_unactionable_transactions() {
 		// given
 		let api = Arc::new(TestApi::default());
-		let pool = Pool::new(
+		let pool = Pool::new_with_staticly_sized_rotator(
 			Default::default(),
 			// the node does not author blocks
 			false.into(),
@@ -767,7 +790,7 @@ mod tests {
 		let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() };
 
 		let api = Arc::new(TestApi::default());
-		let pool = Pool::new(options, true.into(), api.clone());
+		let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone());
 
 		let hash1 =
 			block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())).unwrap();
@@ -803,7 +826,7 @@ mod tests {
 		let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() };
 
 		let api = Arc::new(TestApi::default());
-		let pool = Pool::new(options, true.into(), api.clone());
+		let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone());
 
 		// when
 		block_on(
@@ -1036,7 +1059,7 @@ mod tests {
 				Options { ready: limit.clone(), future: limit.clone(), ..Default::default() };
 
 			let api = Arc::new(TestApi::default());
-			let pool = Pool::new(options, true.into(), api.clone());
+			let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone());
 
 			let xt = uxt(Transfer {
 				from: Alice.into(),
@@ -1074,7 +1097,7 @@ mod tests {
 					Options { ready: limit.clone(), future: limit.clone(), ..Default::default() };
 
 				let api = Arc::new(TestApi::default());
-				let pool = Pool::new(options, true.into(), api.clone());
+				let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone());
 
 				// after validation `IncludeData` will have priority set to 9001
 				// (validate_transaction mock)
@@ -1106,7 +1129,7 @@ mod tests {
 					Options { ready: limit.clone(), future: limit.clone(), ..Default::default() };
 
 				let api = Arc::new(TestApi::default());
-				let pool = Pool::new(options, true.into(), api.clone());
+				let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone());
 
 				let han_of_block0 = api.expect_hash_and_number(0);
 
@@ -1151,7 +1174,11 @@ mod tests {
 			let mut api = TestApi::default();
 			api.delay = Arc::new(Mutex::new(rx.into()));
 			let api = Arc::new(api);
-			let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone()));
+			let pool = Arc::new(Pool::new_with_staticly_sized_rotator(
+				Default::default(),
+				true.into(),
+				api.clone(),
+			));
 
 			let han_of_block0 = api.expect_hash_and_number(0);
 
diff --git a/substrate/client/transaction-pool/src/graph/rotator.rs b/substrate/client/transaction-pool/src/graph/rotator.rs
index 9a2e269b5ee..80d8f24144c 100644
--- a/substrate/client/transaction-pool/src/graph/rotator.rs
+++ b/substrate/client/transaction-pool/src/graph/rotator.rs
@@ -31,7 +31,10 @@ use std::{
 use super::base_pool::Transaction;
 
 /// Expected size of the banned extrinsics cache.
-const EXPECTED_SIZE: usize = 2048;
+const DEFAULT_EXPECTED_SIZE: usize = 2048;
+
+/// The default duration, in seconds, for which an extrinsic is banned.
+const DEFAULT_BAN_TIME_SECS: u64 = 30 * 60;
 
 /// Pool rotator is responsible to only keep fresh extrinsics in the pool.
 ///
@@ -42,18 +45,39 @@ pub struct PoolRotator<Hash> {
 	ban_time: Duration,
 	/// Currently banned extrinsics.
 	banned_until: RwLock<HashMap<Hash, Instant>>,
+	/// Expected size of the banned extrinsics cache.
+	expected_size: usize,
+}
+
+impl<Hash: Clone> Clone for PoolRotator<Hash> {
+	fn clone(&self) -> Self {
+		Self {
+			ban_time: self.ban_time,
+			banned_until: RwLock::new(self.banned_until.read().clone()),
+			expected_size: self.expected_size,
+		}
+	}
 }
 
 impl<Hash: hash::Hash + Eq> Default for PoolRotator<Hash> {
 	fn default() -> Self {
-		Self { ban_time: Duration::from_secs(60 * 30), banned_until: Default::default() }
+		Self {
+			ban_time: Duration::from_secs(DEFAULT_BAN_TIME_SECS),
+			banned_until: Default::default(),
+			expected_size: DEFAULT_EXPECTED_SIZE,
+		}
 	}
 }
 
 impl<Hash: hash::Hash + Eq + Clone> PoolRotator<Hash> {
 	/// New rotator instance with specified ban time.
 	pub fn new(ban_time: Duration) -> Self {
-		Self { ban_time, banned_until: Default::default() }
+		Self { ban_time, ..Self::default() }
+	}
+
+	/// New rotator instance with specified ban time and expected cache size.
+	pub fn new_with_expected_size(ban_time: Duration, expected_size: usize) -> Self {
+		Self { expected_size, ..Self::new(ban_time) }
 	}
 
 	/// Returns `true` if extrinsic hash is currently banned.
@@ -69,8 +93,8 @@ impl<Hash: hash::Hash + Eq + Clone> PoolRotator<Hash> {
 			banned.insert(hash, *now + self.ban_time);
 		}
 
-		if banned.len() > 2 * EXPECTED_SIZE {
-			while banned.len() > EXPECTED_SIZE {
+		if banned.len() > 2 * self.expected_size {
+			while banned.len() > self.expected_size {
 				if let Some(key) = banned.keys().next().cloned() {
 					banned.remove(&key);
 				}
@@ -201,16 +225,16 @@ mod tests {
 		let past_block = 0;
 
 		// when
-		for i in 0..2 * EXPECTED_SIZE {
+		for i in 0..2 * DEFAULT_EXPECTED_SIZE {
 			let tx = tx_with(i as u64, past_block);
 			assert!(rotator.ban_if_stale(&now, past_block, &tx));
 		}
-		assert_eq!(rotator.banned_until.read().len(), 2 * EXPECTED_SIZE);
+		assert_eq!(rotator.banned_until.read().len(), 2 * DEFAULT_EXPECTED_SIZE);
 
 		// then
-		let tx = tx_with(2 * EXPECTED_SIZE as u64, past_block);
+		let tx = tx_with(2 * DEFAULT_EXPECTED_SIZE as u64, past_block);
 		// trigger a garbage collection
 		assert!(rotator.ban_if_stale(&now, past_block, &tx));
-		assert_eq!(rotator.banned_until.read().len(), EXPECTED_SIZE);
+		assert_eq!(rotator.banned_until.read().len(), DEFAULT_EXPECTED_SIZE);
 	}
 }
diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs
index 14df63d9673..3f7bf4773de 100644
--- a/substrate/client/transaction-pool/src/graph/validated_pool.rs
+++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs
@@ -121,16 +121,41 @@ impl<B: ChainApi> Clone for ValidatedPool<B> {
 			listener: Default::default(),
 			pool: RwLock::from(self.pool.read().clone()),
 			import_notification_sinks: Default::default(),
-			rotator: PoolRotator::default(),
+			rotator: self.rotator.clone(),
 		}
 	}
 }
 
 impl<B: ChainApi> ValidatedPool<B> {
+	/// Create a new transaction pool with statically sized rotator.
+	pub fn new_with_staticly_sized_rotator(
+		options: Options,
+		is_validator: IsValidator,
+		api: Arc<B>,
+	) -> Self {
+		let ban_time = options.ban_time;
+		Self::new_with_rotator(options, is_validator, api, PoolRotator::new(ban_time))
+	}
+
 	/// Create a new transaction pool.
 	pub fn new(options: Options, is_validator: IsValidator, api: Arc<B>) -> Self {
-		let base_pool = base::BasePool::new(options.reject_future_transactions);
 		let ban_time = options.ban_time;
+		let total_count = options.total_count();
+		Self::new_with_rotator(
+			options,
+			is_validator,
+			api,
+			PoolRotator::new_with_expected_size(ban_time, total_count),
+		)
+	}
+
+	fn new_with_rotator(
+		options: Options,
+		is_validator: IsValidator,
+		api: Arc<B>,
+		rotator: PoolRotator<ExtrinsicHash<B>>,
+	) -> Self {
+		let base_pool = base::BasePool::new(options.reject_future_transactions);
 		Self {
 			is_validator,
 			options,
@@ -138,7 +163,7 @@ impl<B: ChainApi> ValidatedPool<B> {
 			api,
 			pool: RwLock::new(base_pool),
 			import_notification_sinks: Default::default(),
-			rotator: PoolRotator::new(ban_time),
+			rotator,
 		}
 	}
 
diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs
index f22fa2ddabd..caa09585b28 100644
--- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs
+++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs
@@ -384,7 +384,11 @@ mod tests {
 	#[test]
 	fn revalidation_queue_works() {
 		let api = Arc::new(TestApi::default());
-		let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone()));
+		let pool = Arc::new(Pool::new_with_staticly_sized_rotator(
+			Default::default(),
+			true.into(),
+			api.clone(),
+		));
 		let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone()));
 
 		let uxt = uxt(Transfer {
@@ -414,7 +418,11 @@ mod tests {
 	#[test]
 	fn revalidation_queue_skips_revalidation_for_unknown_block_hash() {
 		let api = Arc::new(TestApi::default());
-		let pool = Arc::new(Pool::new(Default::default(), true.into(), api.clone()));
+		let pool = Arc::new(Pool::new_with_staticly_sized_rotator(
+			Default::default(),
+			true.into(),
+			api.clone(),
+		));
 		let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone()));
 
 		let uxt0 = uxt(Transfer {
diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs
index e7504012ca6..2b32704945c 100644
--- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs
+++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs
@@ -141,7 +141,11 @@ where
 		finalized_hash: Block::Hash,
 		options: graph::Options,
 	) -> (Self, Pin<Box<dyn Future<Output = ()> + Send>>) {
-		let pool = Arc::new(graph::Pool::new(options, true.into(), pool_api.clone()));
+		let pool = Arc::new(graph::Pool::new_with_staticly_sized_rotator(
+			options,
+			true.into(),
+			pool_api.clone(),
+		));
 		let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background(
 			pool_api.clone(),
 			pool.clone(),
@@ -177,7 +181,11 @@ where
 		best_block_hash: Block::Hash,
 		finalized_hash: Block::Hash,
 	) -> Self {
-		let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone()));
+		let pool = Arc::new(graph::Pool::new_with_staticly_sized_rotator(
+			options,
+			is_validator,
+			pool_api.clone(),
+		));
 		let (revalidation_queue, background_task) = match revalidation_type {
 			RevalidationType::Light =>
 				(revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None),
diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs
index 8bf08122995..dd82c52a604 100644
--- a/substrate/client/transaction-pool/tests/fatp.rs
+++ b/substrate/client/transaction-pool/tests/fatp.rs
@@ -2199,7 +2199,7 @@ fn import_sink_works3() {
 		pool.submit_one(genesis, SOURCE, xt1.clone()),
 	];
 
-	let x = block_on(futures::future::join_all(submissions));
+	block_on(futures::future::join_all(submissions));
 
 	let header01a = api.push_block(1, vec![], true);
 	let header01b = api.push_block(1, vec![], true);
@@ -2213,8 +2213,6 @@ fn import_sink_works3() {
 	assert_pool_status!(header01a.hash(), &pool, 1, 1);
 	assert_pool_status!(header01b.hash(), &pool, 1, 1);
 
-	log::debug!("xxx {x:#?}");
-
 	let import_events =
 		futures::executor::block_on_stream(import_stream).take(1).collect::<Vec<_>>();
 
diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs
index 20997606c60..de35726435f 100644
--- a/substrate/client/transaction-pool/tests/pool.rs
+++ b/substrate/client/transaction-pool/tests/pool.rs
@@ -49,7 +49,7 @@ const LOG_TARGET: &str = "txpool";
 
 fn pool() -> (Pool<TestApi>, Arc<TestApi>) {
 	let api = Arc::new(TestApi::with_alice_nonce(209));
-	(Pool::new(Default::default(), true.into(), api.clone()), api)
+	(Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone()), api)
 }
 
 fn maintained_pool() -> (BasicPool<TestApi, Block>, Arc<TestApi>, futures::executor::ThreadPool) {
@@ -224,7 +224,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() {
 	api.set_valid_modifier(Box::new(|v: &mut ValidTransaction| {
 		v.provides.push(vec![155]);
 	}));
-	let pool = Pool::new(Default::default(), true.into(), api.clone());
+	let pool = Pool::new_with_staticly_sized_rotator(Default::default(), true.into(), api.clone());
 	let xt0 = Arc::from(uxt(Alice, 209));
 	block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, xt0.clone()))
 		.expect("1. Imported");
-- 
GitLab


From cccefdd965c39498825f34e105979c447b315359 Mon Sep 17 00:00:00 2001
From: "polka.dom" <polkadotdom@gmail.com>
Date: Mon, 13 Jan 2025 16:22:32 -0500
Subject: [PATCH 045/116] Remove usage of the pallet::getter macro from
 pallet-grandpa (#4529)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

As per #3326, removes pallet::getter macro usage from pallet-grandpa.
The syntax `StorageItem::<T, I>::get()` should be used instead.

cc @muraca

---------

Co-authored-by: Bastian Köcher <git@kchr.de>
---
 polkadot/runtime/rococo/src/lib.rs          |   2 +-
 polkadot/runtime/test-runtime/src/lib.rs    |   2 +-
 polkadot/runtime/westend/src/lib.rs         |   2 +-
 prdoc/pr_4529.prdoc                         |  22 ++++
 substrate/bin/node/runtime/src/lib.rs       |   2 +-
 substrate/frame/grandpa/src/benchmarking.rs |   4 +-
 substrate/frame/grandpa/src/equivocation.rs |   2 +-
 substrate/frame/grandpa/src/lib.rs          | 106 +++++++++++++-------
 substrate/frame/grandpa/src/tests.rs        |  89 ++++++++--------
 9 files changed, 144 insertions(+), 87 deletions(-)
 create mode 100644 prdoc/pr_4529.prdoc

diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs
index cab4394eb5a..e5d703700fe 100644
--- a/polkadot/runtime/rococo/src/lib.rs
+++ b/polkadot/runtime/rococo/src/lib.rs
@@ -2276,7 +2276,7 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn current_set_id() -> fg_primitives::SetId {
-			Grandpa::current_set_id()
+			pallet_grandpa::CurrentSetId::<Runtime>::get()
 		}
 
 		fn submit_report_equivocation_unsigned_extrinsic(
diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs
index 82564d5c278..4f9ba8d8508 100644
--- a/polkadot/runtime/test-runtime/src/lib.rs
+++ b/polkadot/runtime/test-runtime/src/lib.rs
@@ -1186,7 +1186,7 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn current_set_id() -> fg_primitives::SetId {
-			Grandpa::current_set_id()
+			pallet_grandpa::CurrentSetId::<Runtime>::get()
 		}
 
 		fn submit_report_equivocation_unsigned_extrinsic(
diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index 166f3fc42ee..9d77a5e5eea 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -2300,7 +2300,7 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn current_set_id() -> fg_primitives::SetId {
-			Grandpa::current_set_id()
+			pallet_grandpa::CurrentSetId::<Runtime>::get()
 		}
 
 		fn submit_report_equivocation_unsigned_extrinsic(
diff --git a/prdoc/pr_4529.prdoc b/prdoc/pr_4529.prdoc
new file mode 100644
index 00000000000..32beea17ad6
--- /dev/null
+++ b/prdoc/pr_4529.prdoc
@@ -0,0 +1,22 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Removed `pallet::getter` usage from pallet-grandpa
+
+doc:
+  - audience: Runtime Dev
+    description: |
+      This PR removed the `pallet::getter`s from `pallet-grandpa`.
+      The syntax `StorageItem::<T, I>::get()` should be used instead
+
+crates:
+  - name: pallet-grandpa
+    bump: minor
+  - name: kitchensink-runtime
+    bump: none
+  - name: westend-runtime
+    bump: none
+  - name: polkadot-test-runtime
+    bump: none
+  - name: rococo-runtime
+    bump: none
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index 7de04b27ff8..e11a009c1c3 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -2979,7 +2979,7 @@ impl_runtime_apis! {
 		}
 
 		fn current_set_id() -> sp_consensus_grandpa::SetId {
-			Grandpa::current_set_id()
+			pallet_grandpa::CurrentSetId::<Runtime>::get()
 		}
 
 		fn submit_report_equivocation_unsigned_extrinsic(
diff --git a/substrate/frame/grandpa/src/benchmarking.rs b/substrate/frame/grandpa/src/benchmarking.rs
index 0a10e588277..56048efa22c 100644
--- a/substrate/frame/grandpa/src/benchmarking.rs
+++ b/substrate/frame/grandpa/src/benchmarking.rs
@@ -17,7 +17,7 @@
 
 //! Benchmarks for the GRANDPA pallet.
 
-use super::{Pallet as Grandpa, *};
+use super::*;
 use frame_benchmarking::v2::*;
 use frame_system::RawOrigin;
 use sp_core::H256;
@@ -69,7 +69,7 @@ mod benchmarks {
 		#[extrinsic_call]
 		_(RawOrigin::Root, delay, best_finalized_block_number);
 
-		assert!(Grandpa::<T>::stalled().is_some());
+		assert!(Stalled::<T>::get().is_some());
 	}
 
 	impl_benchmark_test_suite!(
diff --git a/substrate/frame/grandpa/src/equivocation.rs b/substrate/frame/grandpa/src/equivocation.rs
index 2366c957e9a..4ebdbc1eecd 100644
--- a/substrate/frame/grandpa/src/equivocation.rs
+++ b/substrate/frame/grandpa/src/equivocation.rs
@@ -177,7 +177,7 @@ where
 		evidence: (EquivocationProof<T::Hash, BlockNumberFor<T>>, T::KeyOwnerProof),
 	) -> Result<(), DispatchError> {
 		let (equivocation_proof, key_owner_proof) = evidence;
-		let reporter = reporter.or_else(|| <pallet_authorship::Pallet<T>>::author());
+		let reporter = reporter.or_else(|| pallet_authorship::Pallet::<T>::author());
 		let offender = equivocation_proof.offender().clone();
 
 		// We check the equivocation within the context of its set id (and
diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs
index 4f69aeaef52..9017eec2ca8 100644
--- a/substrate/frame/grandpa/src/lib.rs
+++ b/substrate/frame/grandpa/src/lib.rs
@@ -127,7 +127,7 @@ pub mod pallet {
 	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
 		fn on_finalize(block_number: BlockNumberFor<T>) {
 			// check for scheduled pending authority set changes
-			if let Some(pending_change) = <PendingChange<T>>::get() {
+			if let Some(pending_change) = PendingChange::<T>::get() {
 				// emit signal if we're at the block that scheduled the change
 				if block_number == pending_change.scheduled_at {
 					let next_authorities = pending_change.next_authorities.to_vec();
@@ -150,12 +150,12 @@ pub mod pallet {
 					Self::deposit_event(Event::NewAuthorities {
 						authority_set: pending_change.next_authorities.into_inner(),
 					});
-					<PendingChange<T>>::kill();
+					PendingChange::<T>::kill();
 				}
 			}
 
 			// check for scheduled pending state changes
-			match <State<T>>::get() {
+			match State::<T>::get() {
 				StoredState::PendingPause { scheduled_at, delay } => {
 					// signal change to pause
 					if block_number == scheduled_at {
@@ -164,7 +164,7 @@ pub mod pallet {
 
 					// enact change to paused state
 					if block_number == scheduled_at + delay {
-						<State<T>>::put(StoredState::Paused);
+						State::<T>::put(StoredState::Paused);
 						Self::deposit_event(Event::Paused);
 					}
 				},
@@ -176,7 +176,7 @@ pub mod pallet {
 
 					// enact change to live state
 					if block_number == scheduled_at + delay {
-						<State<T>>::put(StoredState::Live);
+						State::<T>::put(StoredState::Live);
 						Self::deposit_event(Event::Resumed);
 					}
 				},
@@ -297,37 +297,32 @@ pub mod pallet {
 	}
 
 	#[pallet::type_value]
-	pub(super) fn DefaultForState<T: Config>() -> StoredState<BlockNumberFor<T>> {
+	pub fn DefaultForState<T: Config>() -> StoredState<BlockNumberFor<T>> {
 		StoredState::Live
 	}
 
 	/// State of the current authority set.
 	#[pallet::storage]
-	#[pallet::getter(fn state)]
-	pub(super) type State<T: Config> =
+	pub type State<T: Config> =
 		StorageValue<_, StoredState<BlockNumberFor<T>>, ValueQuery, DefaultForState<T>>;
 
 	/// Pending change: (signaled at, scheduled change).
 	#[pallet::storage]
-	#[pallet::getter(fn pending_change)]
-	pub(super) type PendingChange<T: Config> =
+	pub type PendingChange<T: Config> =
 		StorageValue<_, StoredPendingChange<BlockNumberFor<T>, T::MaxAuthorities>>;
 
 	/// next block number where we can force a change.
 	#[pallet::storage]
-	#[pallet::getter(fn next_forced)]
-	pub(super) type NextForced<T: Config> = StorageValue<_, BlockNumberFor<T>>;
+	pub type NextForced<T: Config> = StorageValue<_, BlockNumberFor<T>>;
 
 	/// `true` if we are currently stalled.
 	#[pallet::storage]
-	#[pallet::getter(fn stalled)]
-	pub(super) type Stalled<T: Config> = StorageValue<_, (BlockNumberFor<T>, BlockNumberFor<T>)>;
+	pub type Stalled<T: Config> = StorageValue<_, (BlockNumberFor<T>, BlockNumberFor<T>)>;
 
 	/// The number of changes (both in terms of keys and underlying economic responsibilities)
 	/// in the "set" of Grandpa validators from genesis.
 	#[pallet::storage]
-	#[pallet::getter(fn current_set_id)]
-	pub(super) type CurrentSetId<T: Config> = StorageValue<_, SetId, ValueQuery>;
+	pub type CurrentSetId<T: Config> = StorageValue<_, SetId, ValueQuery>;
 
 	/// A mapping from grandpa set ID to the index of the *most recent* session for which its
 	/// members were responsible.
@@ -340,12 +335,11 @@ pub mod pallet {
 	///
 	/// TWOX-NOTE: `SetId` is not under user control.
 	#[pallet::storage]
-	#[pallet::getter(fn session_for_set)]
-	pub(super) type SetIdSession<T: Config> = StorageMap<_, Twox64Concat, SetId, SessionIndex>;
+	pub type SetIdSession<T: Config> = StorageMap<_, Twox64Concat, SetId, SessionIndex>;
 
 	/// The current list of authorities.
 	#[pallet::storage]
-	pub(crate) type Authorities<T: Config> =
+	pub type Authorities<T: Config> =
 		StorageValue<_, BoundedAuthorityList<T::MaxAuthorities>, ValueQuery>;
 
 	#[derive(frame_support::DefaultNoBound)]
@@ -432,6 +426,44 @@ pub enum StoredState<N> {
 }
 
 impl<T: Config> Pallet<T> {
+	/// State of the current authority set.
+	pub fn state() -> StoredState<BlockNumberFor<T>> {
+		State::<T>::get()
+	}
+
+	/// Pending change: (signaled at, scheduled change).
+	pub fn pending_change() -> Option<StoredPendingChange<BlockNumberFor<T>, T::MaxAuthorities>> {
+		PendingChange::<T>::get()
+	}
+
+	/// next block number where we can force a change.
+	pub fn next_forced() -> Option<BlockNumberFor<T>> {
+		NextForced::<T>::get()
+	}
+
+	/// `true` if we are currently stalled.
+	pub fn stalled() -> Option<(BlockNumberFor<T>, BlockNumberFor<T>)> {
+		Stalled::<T>::get()
+	}
+
+	/// The number of changes (both in terms of keys and underlying economic responsibilities)
+	/// in the "set" of Grandpa validators from genesis.
+	pub fn current_set_id() -> SetId {
+		CurrentSetId::<T>::get()
+	}
+
+	/// A mapping from grandpa set ID to the index of the *most recent* session for which its
+	/// members were responsible.
+	///
+	/// This is only used for validating equivocation proofs. An equivocation proof must
+	/// contains a key-ownership proof for a given session, therefore we need a way to tie
+	/// together sessions and GRANDPA set ids, i.e. we need to validate that a validator
+	/// was the owner of a given key on a given session, and what the active set ID was
+	/// during that session.
+	pub fn session_for_set(set_id: SetId) -> Option<SessionIndex> {
+		SetIdSession::<T>::get(set_id)
+	}
+
 	/// Get the current set of authorities, along with their respective weights.
 	pub fn grandpa_authorities() -> AuthorityList {
 		Authorities::<T>::get().into_inner()
@@ -440,9 +472,9 @@ impl<T: Config> Pallet<T> {
 	/// Schedule GRANDPA to pause starting in the given number of blocks.
 	/// Cannot be done when already paused.
 	pub fn schedule_pause(in_blocks: BlockNumberFor<T>) -> DispatchResult {
-		if let StoredState::Live = <State<T>>::get() {
-			let scheduled_at = <frame_system::Pallet<T>>::block_number();
-			<State<T>>::put(StoredState::PendingPause { delay: in_blocks, scheduled_at });
+		if let StoredState::Live = State::<T>::get() {
+			let scheduled_at = frame_system::Pallet::<T>::block_number();
+			State::<T>::put(StoredState::PendingPause { delay: in_blocks, scheduled_at });
 
 			Ok(())
 		} else {
@@ -452,9 +484,9 @@ impl<T: Config> Pallet<T> {
 
 	/// Schedule a resume of GRANDPA after pausing.
 	pub fn schedule_resume(in_blocks: BlockNumberFor<T>) -> DispatchResult {
-		if let StoredState::Paused = <State<T>>::get() {
-			let scheduled_at = <frame_system::Pallet<T>>::block_number();
-			<State<T>>::put(StoredState::PendingResume { delay: in_blocks, scheduled_at });
+		if let StoredState::Paused = State::<T>::get() {
+			let scheduled_at = frame_system::Pallet::<T>::block_number();
+			State::<T>::put(StoredState::PendingResume { delay: in_blocks, scheduled_at });
 
 			Ok(())
 		} else {
@@ -481,17 +513,17 @@ impl<T: Config> Pallet<T> {
 		in_blocks: BlockNumberFor<T>,
 		forced: Option<BlockNumberFor<T>>,
 	) -> DispatchResult {
-		if !<PendingChange<T>>::exists() {
-			let scheduled_at = <frame_system::Pallet<T>>::block_number();
+		if !PendingChange::<T>::exists() {
+			let scheduled_at = frame_system::Pallet::<T>::block_number();
 
 			if forced.is_some() {
-				if Self::next_forced().map_or(false, |next| next > scheduled_at) {
+				if NextForced::<T>::get().map_or(false, |next| next > scheduled_at) {
 					return Err(Error::<T>::TooSoon.into())
 				}
 
 				// only allow the next forced change when twice the window has passed since
 				// this one.
-				<NextForced<T>>::put(scheduled_at + in_blocks * 2u32.into());
+				NextForced::<T>::put(scheduled_at + in_blocks * 2u32.into());
 			}
 
 			let next_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from(
@@ -502,7 +534,7 @@ impl<T: Config> Pallet<T> {
 				),
 			);
 
-			<PendingChange<T>>::put(StoredPendingChange {
+			PendingChange::<T>::put(StoredPendingChange {
 				delay: in_blocks,
 				scheduled_at,
 				next_authorities,
@@ -518,7 +550,7 @@ impl<T: Config> Pallet<T> {
 	/// Deposit one of this module's logs.
 	fn deposit_log(log: ConsensusLog<BlockNumberFor<T>>) {
 		let log = DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode());
-		<frame_system::Pallet<T>>::deposit_log(log);
+		frame_system::Pallet::<T>::deposit_log(log);
 	}
 
 	// Perform module initialization, abstracted so that it can be called either through genesis
@@ -554,7 +586,7 @@ impl<T: Config> Pallet<T> {
 		// when we record old authority sets we could try to figure out _who_
 		// failed. until then, we can't meaningfully guard against
 		// `next == last` the way that normal session changes do.
-		<Stalled<T>>::put((further_wait, median));
+		Stalled::<T>::put((further_wait, median));
 	}
 }
 
@@ -583,10 +615,10 @@ where
 		// Always issue a change if `session` says that the validators have changed.
 		// Even if their session keys are the same as before, the underlying economic
 		// identities have changed.
-		let current_set_id = if changed || <Stalled<T>>::exists() {
+		let current_set_id = if changed || Stalled::<T>::exists() {
 			let next_authorities = validators.map(|(_, k)| (k, 1)).collect::<Vec<_>>();
 
-			let res = if let Some((further_wait, median)) = <Stalled<T>>::take() {
+			let res = if let Some((further_wait, median)) = Stalled::<T>::take() {
 				Self::schedule_change(next_authorities, further_wait, Some(median))
 			} else {
 				Self::schedule_change(next_authorities, Zero::zero(), None)
@@ -608,17 +640,17 @@ where
 				// either the session module signalled that the validators have changed
 				// or the set was stalled. but since we didn't successfully schedule
 				// an authority set change we do not increment the set id.
-				Self::current_set_id()
+				CurrentSetId::<T>::get()
 			}
 		} else {
 			// nothing's changed, neither economic conditions nor session keys. update the pointer
 			// of the current set.
-			Self::current_set_id()
+			CurrentSetId::<T>::get()
 		};
 
 		// update the mapping to note that the current set corresponds to the
 		// latest equivalent session (i.e. now).
-		let session_index = <pallet_session::Pallet<T>>::current_index();
+		let session_index = pallet_session::Pallet::<T>::current_index();
 		SetIdSession::<T>::insert(current_set_id, &session_index);
 	}
 
diff --git a/substrate/frame/grandpa/src/tests.rs b/substrate/frame/grandpa/src/tests.rs
index 383f77f00de..f4720966b17 100644
--- a/substrate/frame/grandpa/src/tests.rs
+++ b/substrate/frame/grandpa/src/tests.rs
@@ -110,7 +110,7 @@ fn cannot_schedule_change_when_one_pending() {
 	new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| {
 		initialize_block(1, Default::default());
 		Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 1, None).unwrap();
-		assert!(<PendingChange<Test>>::exists());
+		assert!(PendingChange::<Test>::exists());
 		assert_noop!(
 			Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None),
 			Error::<Test>::ChangePending
@@ -120,7 +120,7 @@ fn cannot_schedule_change_when_one_pending() {
 		let header = System::finalize();
 
 		initialize_block(2, header.hash());
-		assert!(<PendingChange<Test>>::exists());
+		assert!(PendingChange::<Test>::exists());
 		assert_noop!(
 			Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None),
 			Error::<Test>::ChangePending
@@ -130,7 +130,7 @@ fn cannot_schedule_change_when_one_pending() {
 		let header = System::finalize();
 
 		initialize_block(3, header.hash());
-		assert!(!<PendingChange<Test>>::exists());
+		assert!(!PendingChange::<Test>::exists());
 		assert_ok!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None));
 
 		Grandpa::on_finalize(3);
@@ -144,7 +144,7 @@ fn dispatch_forced_change() {
 		initialize_block(1, Default::default());
 		Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 5, Some(0)).unwrap();
 
-		assert!(<PendingChange<Test>>::exists());
+		assert!(PendingChange::<Test>::exists());
 		assert_noop!(
 			Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)),
 			Error::<Test>::ChangePending
@@ -155,8 +155,8 @@ fn dispatch_forced_change() {
 
 		for i in 2..7 {
 			initialize_block(i, header.hash());
-			assert!(<PendingChange<Test>>::get().unwrap().forced.is_some());
-			assert_eq!(Grandpa::next_forced(), Some(11));
+			assert!(PendingChange::<Test>::get().unwrap().forced.is_some());
+			assert_eq!(NextForced::<Test>::get(), Some(11));
 			assert_noop!(
 				Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None),
 				Error::<Test>::ChangePending
@@ -174,7 +174,7 @@ fn dispatch_forced_change() {
 		// add a normal change.
 		{
 			initialize_block(7, header.hash());
-			assert!(!<PendingChange<Test>>::exists());
+			assert!(!PendingChange::<Test>::exists());
 			assert_eq!(
 				Grandpa::grandpa_authorities(),
 				to_authorities(vec![(4, 1), (5, 1), (6, 1)])
@@ -187,7 +187,7 @@ fn dispatch_forced_change() {
 		// run the normal change.
 		{
 			initialize_block(8, header.hash());
-			assert!(<PendingChange<Test>>::exists());
+			assert!(PendingChange::<Test>::exists());
 			assert_eq!(
 				Grandpa::grandpa_authorities(),
 				to_authorities(vec![(4, 1), (5, 1), (6, 1)])
@@ -204,9 +204,9 @@ fn dispatch_forced_change() {
 		// time.
 		for i in 9..11 {
 			initialize_block(i, header.hash());
-			assert!(!<PendingChange<Test>>::exists());
+			assert!(!PendingChange::<Test>::exists());
 			assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(5, 1)]));
-			assert_eq!(Grandpa::next_forced(), Some(11));
+			assert_eq!(NextForced::<Test>::get(), Some(11));
 			assert_noop!(
 				Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1)]), 5, Some(0)),
 				Error::<Test>::TooSoon
@@ -217,13 +217,13 @@ fn dispatch_forced_change() {
 
 		{
 			initialize_block(11, header.hash());
-			assert!(!<PendingChange<Test>>::exists());
+			assert!(!PendingChange::<Test>::exists());
 			assert_ok!(Grandpa::schedule_change(
 				to_authorities(vec![(5, 1), (6, 1), (7, 1)]),
 				5,
 				Some(0)
 			));
-			assert_eq!(Grandpa::next_forced(), Some(21));
+			assert_eq!(NextForced::<Test>::get(), Some(21));
 			Grandpa::on_finalize(11);
 			header = System::finalize();
 		}
@@ -239,7 +239,10 @@ fn schedule_pause_only_when_live() {
 		Grandpa::schedule_pause(1).unwrap();
 
 		// we've switched to the pending pause state
-		assert_eq!(Grandpa::state(), StoredState::PendingPause { scheduled_at: 1u64, delay: 1 });
+		assert_eq!(
+			State::<Test>::get(),
+			StoredState::PendingPause { scheduled_at: 1u64, delay: 1 }
+		);
 
 		Grandpa::on_finalize(1);
 		let _ = System::finalize();
@@ -253,7 +256,7 @@ fn schedule_pause_only_when_live() {
 		let _ = System::finalize();
 
 		// after finalizing block 2 the set should have switched to paused state
-		assert_eq!(Grandpa::state(), StoredState::Paused);
+		assert_eq!(State::<Test>::get(), StoredState::Paused);
 	});
 }
 
@@ -265,14 +268,14 @@ fn schedule_resume_only_when_paused() {
 		// the set is currently live, resuming it is an error
 		assert_noop!(Grandpa::schedule_resume(1), Error::<Test>::ResumeFailed);
 
-		assert_eq!(Grandpa::state(), StoredState::Live);
+		assert_eq!(State::<Test>::get(), StoredState::Live);
 
 		// we schedule a pause to be applied instantly
 		Grandpa::schedule_pause(0).unwrap();
 		Grandpa::on_finalize(1);
 		let _ = System::finalize();
 
-		assert_eq!(Grandpa::state(), StoredState::Paused);
+		assert_eq!(State::<Test>::get(), StoredState::Paused);
 
 		// we schedule the set to go back live in 2 blocks
 		initialize_block(2, Default::default());
@@ -289,7 +292,7 @@ fn schedule_resume_only_when_paused() {
 		let _ = System::finalize();
 
 		// it should be live at block 4
-		assert_eq!(Grandpa::state(), StoredState::Live);
+		assert_eq!(State::<Test>::get(), StoredState::Live);
 	});
 }
 
@@ -342,7 +345,7 @@ fn report_equivocation_current_set_works() {
 		let equivocation_key = &authorities[equivocation_authority_index].0;
 		let equivocation_keyring = extract_keyring(equivocation_key);
 
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		// generate an equivocation proof, with two votes in the same round for
 		// different block hashes signed by the same key
@@ -424,7 +427,7 @@ fn report_equivocation_old_set_works() {
 
 		let equivocation_keyring = extract_keyring(equivocation_key);
 
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		// generate an equivocation proof for the old set,
 		let equivocation_proof = generate_equivocation_proof(
@@ -487,7 +490,7 @@ fn report_equivocation_invalid_set_id() {
 		let key_owner_proof =
 			Historical::prove((sp_consensus_grandpa::KEY_TYPE, &equivocation_key)).unwrap();
 
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		// generate an equivocation for a future set
 		let equivocation_proof = generate_equivocation_proof(
@@ -527,7 +530,7 @@ fn report_equivocation_invalid_session() {
 
 		start_era(2);
 
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		// generate an equivocation proof at set id = 2
 		let equivocation_proof = generate_equivocation_proof(
@@ -568,7 +571,7 @@ fn report_equivocation_invalid_key_owner_proof() {
 		let equivocation_key = &authorities[equivocation_authority_index].0;
 		let equivocation_keyring = extract_keyring(equivocation_key);
 
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		// generate an equivocation proof for the authority at index 0
 		let equivocation_proof = generate_equivocation_proof(
@@ -611,7 +614,7 @@ fn report_equivocation_invalid_equivocation_proof() {
 		let key_owner_proof =
 			Historical::prove((sp_consensus_grandpa::KEY_TYPE, &equivocation_key)).unwrap();
 
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		let assert_invalid_equivocation_proof = |equivocation_proof| {
 			assert_err!(
@@ -675,7 +678,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() {
 		let equivocation_authority_index = 0;
 		let equivocation_key = &authorities[equivocation_authority_index].0;
 		let equivocation_keyring = extract_keyring(equivocation_key);
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		let equivocation_proof = generate_equivocation_proof(
 			set_id,
@@ -748,12 +751,12 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() {
 #[test]
 fn on_new_session_doesnt_start_new_set_if_schedule_change_failed() {
 	new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| {
-		assert_eq!(Grandpa::current_set_id(), 0);
+		assert_eq!(CurrentSetId::<Test>::get(), 0);
 
 		// starting a new era should lead to a change in the session
 		// validators and trigger a new set
 		start_era(1);
-		assert_eq!(Grandpa::current_set_id(), 1);
+		assert_eq!(CurrentSetId::<Test>::get(), 1);
 
 		// we schedule a change delayed by 2 blocks, this should make it so that
 		// when we try to rotate the session at the beginning of the era we will
@@ -761,22 +764,22 @@ fn on_new_session_doesnt_start_new_set_if_schedule_change_failed() {
 		// not increment the set id.
 		Grandpa::schedule_change(to_authorities(vec![(1, 1)]), 2, None).unwrap();
 		start_era(2);
-		assert_eq!(Grandpa::current_set_id(), 1);
+		assert_eq!(CurrentSetId::<Test>::get(), 1);
 
 		// everything should go back to normal after.
 		start_era(3);
-		assert_eq!(Grandpa::current_set_id(), 2);
+		assert_eq!(CurrentSetId::<Test>::get(), 2);
 
 		// session rotation might also fail to schedule a change if it's for a
 		// forced change (i.e. grandpa is stalled) and it is too soon.
-		<NextForced<Test>>::put(1000);
-		<Stalled<Test>>::put((30, 1));
+		NextForced::<Test>::put(1000);
+		Stalled::<Test>::put((30, 1));
 
 		// NOTE: we cannot go through normal era rotation since having `Stalled`
 		// defined will also trigger a new set (regardless of whether the
 		// session validators changed)
 		Grandpa::on_new_session(true, std::iter::empty(), std::iter::empty());
-		assert_eq!(Grandpa::current_set_id(), 2);
+		assert_eq!(CurrentSetId::<Test>::get(), 2);
 	});
 }
 
@@ -790,19 +793,19 @@ fn cleans_up_old_set_id_session_mappings() {
 		// we should have a session id mapping for all the set ids from
 		// `max_set_id_session_entries` eras we have observed
 		for i in 1..=max_set_id_session_entries {
-			assert!(Grandpa::session_for_set(i as u64).is_some());
+			assert!(SetIdSession::<Test>::get(i as u64).is_some());
 		}
 
 		start_era(max_set_id_session_entries * 2);
 
 		// we should keep tracking the new mappings for new eras
 		for i in max_set_id_session_entries + 1..=max_set_id_session_entries * 2 {
-			assert!(Grandpa::session_for_set(i as u64).is_some());
+			assert!(SetIdSession::<Test>::get(i as u64).is_some());
 		}
 
 		// but the old ones should have been pruned by now
 		for i in 1..=max_set_id_session_entries {
-			assert!(Grandpa::session_for_set(i as u64).is_none());
+			assert!(SetIdSession::<Test>::get(i as u64).is_none());
 		}
 	});
 }
@@ -812,24 +815,24 @@ fn always_schedules_a_change_on_new_session_when_stalled() {
 	new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| {
 		start_era(1);
 
-		assert!(Grandpa::pending_change().is_none());
-		assert_eq!(Grandpa::current_set_id(), 1);
+		assert!(PendingChange::<Test>::get().is_none());
+		assert_eq!(CurrentSetId::<Test>::get(), 1);
 
 		// if the session handler reports no change then we should not schedule
 		// any pending change
 		Grandpa::on_new_session(false, std::iter::empty(), std::iter::empty());
 
-		assert!(Grandpa::pending_change().is_none());
-		assert_eq!(Grandpa::current_set_id(), 1);
+		assert!(PendingChange::<Test>::get().is_none());
+		assert_eq!(CurrentSetId::<Test>::get(), 1);
 
 		// if grandpa is stalled then we should **always** schedule a forced
 		// change on a new session
-		<Stalled<Test>>::put((10, 1));
+		Stalled::<Test>::put((10, 1));
 		Grandpa::on_new_session(false, std::iter::empty(), std::iter::empty());
 
-		assert!(Grandpa::pending_change().is_some());
-		assert!(Grandpa::pending_change().unwrap().forced.is_some());
-		assert_eq!(Grandpa::current_set_id(), 2);
+		assert!(PendingChange::<Test>::get().is_some());
+		assert!(PendingChange::<Test>::get().unwrap().forced.is_some());
+		assert_eq!(CurrentSetId::<Test>::get(), 2);
 	});
 }
 
@@ -861,7 +864,7 @@ fn valid_equivocation_reports_dont_pay_fees() {
 
 		let equivocation_key = &Grandpa::grandpa_authorities()[0].0;
 		let equivocation_keyring = extract_keyring(equivocation_key);
-		let set_id = Grandpa::current_set_id();
+		let set_id = CurrentSetId::<Test>::get();
 
 		// generate an equivocation proof.
 		let equivocation_proof = generate_equivocation_proof(
-- 
GitLab


From ddffa027d7b78af330a2d3d18b7dfdbd00e431f0 Mon Sep 17 00:00:00 2001
From: Alin Dima <alin@parity.io>
Date: Tue, 14 Jan 2025 10:40:50 +0200
Subject: [PATCH 046/116] forbid v1 descriptors with UMP signals (#7127)

---
 .../node/core/candidate-validation/src/lib.rs | 15 ++--
 .../core/candidate-validation/src/tests.rs    | 71 +++++++++++++++++--
 polkadot/primitives/src/vstaging/mod.rs       | 30 ++++++--
 prdoc/pr_7127.prdoc                           |  9 +++
 4 files changed, 104 insertions(+), 21 deletions(-)
 create mode 100644 prdoc/pr_7127.prdoc

diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs
index 25614349486..2a4643031bf 100644
--- a/polkadot/node/core/candidate-validation/src/lib.rs
+++ b/polkadot/node/core/candidate-validation/src/lib.rs
@@ -912,15 +912,10 @@ async fn validate_candidate_exhaustive(
 					// invalid.
 					Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))
 				} else {
-					let core_index = candidate_receipt.descriptor.core_index();
-
-					match (core_index, exec_kind) {
+					match exec_kind {
 						// Core selectors are optional for V2 descriptors, but we still check the
 						// descriptor core index.
-						(
-							Some(_core_index),
-							PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_),
-						) => {
+						PvfExecKind::Backing(_) | PvfExecKind::BackingSystemParas(_) => {
 							let Some(claim_queue) = maybe_claim_queue else {
 								let error = "cannot fetch the claim queue from the runtime";
 								gum::warn!(
@@ -937,9 +932,9 @@ async fn validate_candidate_exhaustive(
 							{
 								gum::warn!(
 									target: LOG_TARGET,
-									?err,
 									candidate_hash = ?candidate_receipt.hash(),
-									"Candidate core index is invalid",
+									"Candidate core index is invalid: {}",
+									err
 								);
 								return Ok(ValidationResult::Invalid(
 									InvalidCandidate::InvalidCoreIndex,
@@ -947,7 +942,7 @@ async fn validate_candidate_exhaustive(
 							}
 						},
 						// No checks for approvals and disputes
-						(_, _) => {},
+						_ => {},
 					}
 
 					Ok(ValidationResult::Valid(
diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs
index 98e34a1cb4c..795d7c93f8a 100644
--- a/polkadot/node/core/candidate-validation/src/tests.rs
+++ b/polkadot/node/core/candidate-validation/src/tests.rs
@@ -30,8 +30,8 @@ use polkadot_node_subsystem_util::reexports::SubsystemContext;
 use polkadot_overseer::ActivatedLeaf;
 use polkadot_primitives::{
 	vstaging::{
-		CandidateDescriptorV2, ClaimQueueOffset, CoreSelector, MutateDescriptorV2, UMPSignal,
-		UMP_SEPARATOR,
+		CandidateDescriptorV2, CandidateDescriptorVersion, ClaimQueueOffset, CoreSelector,
+		MutateDescriptorV2, UMPSignal, UMP_SEPARATOR,
 	},
 	CandidateDescriptor, CoreIndex, GroupIndex, HeadData, Id as ParaId, OccupiedCoreAssumption,
 	SessionInfo, UpwardMessage, ValidatorId,
@@ -851,7 +851,7 @@ fn invalid_session_or_core_index() {
 	))
 	.unwrap();
 
-	// Validation doesn't fail for approvals, core/session index is not checked.
+	// Validation doesn't fail for disputes, core/session index is not checked.
 	assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => {
 		assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1]));
 		assert_eq!(outputs.upward_messages, commitments.upward_messages);
@@ -911,6 +911,69 @@ fn invalid_session_or_core_index() {
 		assert_eq!(outputs.hrmp_watermark, 0);
 		assert_eq!(used_validation_data, validation_data);
 	});
+
+	// Test that a v1 candidate that outputs the core selector UMP signal is invalid.
+	let descriptor_v1 = make_valid_candidate_descriptor(
+		ParaId::from(1_u32),
+		dummy_hash(),
+		dummy_hash(),
+		pov.hash(),
+		validation_code.hash(),
+		validation_result.head_data.hash(),
+		dummy_hash(),
+		sp_keyring::Sr25519Keyring::Ferdie,
+	);
+	let descriptor: CandidateDescriptorV2 = descriptor_v1.into();
+
+	perform_basic_checks(&descriptor, validation_data.max_pov_size, &pov, &validation_code.hash())
+		.unwrap();
+	assert_eq!(descriptor.version(), CandidateDescriptorVersion::V1);
+	let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: commitments.hash() };
+
+	for exec_kind in
+		[PvfExecKind::Backing(dummy_hash()), PvfExecKind::BackingSystemParas(dummy_hash())]
+	{
+		let result = executor::block_on(validate_candidate_exhaustive(
+			Some(1),
+			MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())),
+			validation_data.clone(),
+			validation_code.clone(),
+			candidate_receipt.clone(),
+			Arc::new(pov.clone()),
+			ExecutorParams::default(),
+			exec_kind,
+			&Default::default(),
+			Some(Default::default()),
+		))
+		.unwrap();
+		assert_matches!(result, ValidationResult::Invalid(InvalidCandidate::InvalidCoreIndex));
+	}
+
+	// Validation doesn't fail for approvals and disputes, core/session index is not checked.
+	for exec_kind in [PvfExecKind::Approval, PvfExecKind::Dispute] {
+		let v = executor::block_on(validate_candidate_exhaustive(
+			Some(1),
+			MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result.clone())),
+			validation_data.clone(),
+			validation_code.clone(),
+			candidate_receipt.clone(),
+			Arc::new(pov.clone()),
+			ExecutorParams::default(),
+			exec_kind,
+			&Default::default(),
+			Default::default(),
+		))
+		.unwrap();
+
+		assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => {
+			assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1]));
+			assert_eq!(outputs.upward_messages, commitments.upward_messages);
+			assert_eq!(outputs.horizontal_messages, Vec::new());
+			assert_eq!(outputs.new_validation_code, Some(vec![2, 2, 2].into()));
+			assert_eq!(outputs.hrmp_watermark, 0);
+			assert_eq!(used_validation_data, validation_data);
+		});
+	}
 }
 
 #[test]
@@ -1407,7 +1470,7 @@ fn compressed_code_works() {
 		ExecutorParams::default(),
 		PvfExecKind::Backing(dummy_hash()),
 		&Default::default(),
-		Default::default(),
+		Some(Default::default()),
 	));
 
 	assert_matches!(v, Ok(ValidationResult::Valid(_, _)));
diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs
index 271f78efe09..c52f3539c3e 100644
--- a/polkadot/primitives/src/vstaging/mod.rs
+++ b/polkadot/primitives/src/vstaging/mod.rs
@@ -505,6 +505,10 @@ pub enum CommittedCandidateReceiptError {
 	/// Currenly only one such message is allowed.
 	#[cfg_attr(feature = "std", error("Too many UMP signals"))]
 	TooManyUMPSignals,
+	/// If the parachain runtime started sending core selectors, v1 descriptors are no longer
+	/// allowed.
+	#[cfg_attr(feature = "std", error("Version 1 receipt does not support core selectors"))]
+	CoreSelectorWithV1Decriptor,
 }
 
 macro_rules! impl_getter {
@@ -603,15 +607,25 @@ impl<H: Copy> CommittedCandidateReceiptV2<H> {
 		&self,
 		cores_per_para: &TransposedClaimQueue,
 	) -> Result<(), CommittedCandidateReceiptError> {
+		let maybe_core_selector = self.commitments.core_selector()?;
+
 		match self.descriptor.version() {
-			// Don't check v1 descriptors.
-			CandidateDescriptorVersion::V1 => return Ok(()),
+			CandidateDescriptorVersion::V1 => {
+				// If the parachain runtime started sending core selectors, v1 descriptors are no
+				// longer allowed.
+				if maybe_core_selector.is_some() {
+					return Err(CommittedCandidateReceiptError::CoreSelectorWithV1Decriptor)
+				} else {
+					// Nothing else to check for v1 descriptors.
+					return Ok(())
+				}
+			},
 			CandidateDescriptorVersion::V2 => {},
 			CandidateDescriptorVersion::Unknown =>
 				return Err(CommittedCandidateReceiptError::UnknownVersion(self.descriptor.version)),
 		}
 
-		let (maybe_core_index_selector, cq_offset) = self.commitments.core_selector()?.map_or_else(
+		let (maybe_core_index_selector, cq_offset) = maybe_core_selector.map_or_else(
 			|| (None, ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET)),
 			|(sel, off)| (Some(sel), off),
 		);
@@ -1207,8 +1221,7 @@ mod tests {
 		assert_eq!(new_ccr.hash(), v2_ccr.hash());
 	}
 
-	// Only check descriptor `core_index` field of v2 descriptors. If it is v1, that field
-	// will be garbage.
+	// V1 descriptors are forbidden once the parachain runtime started sending UMP signals.
 	#[test]
 	fn test_v1_descriptors_with_ump_signal() {
 		let mut ccr = dummy_old_committed_candidate_receipt();
@@ -1234,9 +1247,12 @@ mod tests {
 		cq.insert(CoreIndex(0), vec![v1_ccr.descriptor.para_id()].into());
 		cq.insert(CoreIndex(1), vec![v1_ccr.descriptor.para_id()].into());
 
-		assert!(v1_ccr.check_core_index(&transpose_claim_queue(cq)).is_ok());
-
 		assert_eq!(v1_ccr.descriptor.core_index(), None);
+
+		assert_eq!(
+			v1_ccr.check_core_index(&transpose_claim_queue(cq)),
+			Err(CommittedCandidateReceiptError::CoreSelectorWithV1Decriptor)
+		);
 	}
 
 	#[test]
diff --git a/prdoc/pr_7127.prdoc b/prdoc/pr_7127.prdoc
new file mode 100644
index 00000000000..761ddd04dbe
--- /dev/null
+++ b/prdoc/pr_7127.prdoc
@@ -0,0 +1,9 @@
+title: 'Forbid v1 descriptors with UMP signals'
+doc:
+- audience: [Runtime Dev, Node Dev]
+  description: Adds a check that parachain candidates do not send out UMP signals with v1 descriptors.
+crates:
+- name: polkadot-node-core-candidate-validation
+  bump: minor
+- name: polkadot-primitives
+  bump: major
-- 
GitLab


From f4743b009280e47398790bd85943819540a9ce0a Mon Sep 17 00:00:00 2001
From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com>
Date: Tue, 14 Jan 2025 14:09:01 +0100
Subject: [PATCH 047/116] `fatxpool`: proper handling of priorities when
 mempool is full (#6647)

Higher-priority transactions can now replace lower-priority transactions
even when the internal _tx_mem_pool_ is full.

**Notes for reviewers:**
- The _tx_mem_pool_ now maintains information about transaction
priority. Although _tx_mem_pool_ itself is stateless, transaction
priority is updated after submission to the view. An alternative
approach could involve validating transactions at the `at` block, but
this is computationally expensive. To avoid additional validation
overhead, I opted to use the priority obtained from runtime during
submission to the view. This is the rationale behind introducing the
`SubmitOutcome` struct, which synchronously communicates transaction
priority from the view to the pool. This results in a very brief window
during which the transaction priority remains unknown - those
transaction are not taken into consideration while dropping takes place.
In the future, if needed, we could update transaction priority using
view revalidation results to keep this information fully up-to-date (as
priority of transaction may change with chain-state evolution).
- When _tx_mem_pool_ becomes full (an event anticipated to be rare),
transaction priority must be known to perform priority-based removal. In
such cases, the most recent block known is utilized for validation. I
think that speculative submission to the view and re-using the priority
from this submission would be an unnecessary complication.
- Once the priority is determined, lower-priority transactions whose
cumulative size meets or exceeds the size of the new transaction are
collected to ensure the pool size limit is not exceeded.
- Transaction removed from _tx_mem_pool_ , also needs to be removed from
all the views with appropriate event (which is done by
`remove_transaction_subtree`). To ensure complete removal, the
`PendingTxReplacement` struct was re-factored to more generic
`PendingPreInsertTask` (introduced in #6405) which covers removal and
submssion of transaction in the view which may be potentially created in
the background. This is to ensure that removed transaction will not
re-enter to the newly created view.
- `submit_local` implementation was also improved to properly handle
priorities in case when mempool is full. Some missing tests for this
method were also added.

Closes: #5809

---------

Co-authored-by: command-bot <>
Co-authored-by: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com>
---
 prdoc/pr_6647.prdoc                           |   8 +
 .../src/fork_aware_txpool/dropped_watcher.rs  |  18 +-
 .../fork_aware_txpool/fork_aware_txpool.rs    | 238 +++++++++--
 .../src/fork_aware_txpool/tx_mem_pool.rs      | 402 ++++++++++++++++--
 .../src/fork_aware_txpool/view.rs             |  22 +-
 .../src/fork_aware_txpool/view_store.rs       | 261 +++++++++---
 .../transaction-pool/src/graph/base_pool.rs   |  44 +-
 .../transaction-pool/src/graph/listener.rs    |   4 +-
 .../client/transaction-pool/src/graph/mod.rs  |   8 +-
 .../client/transaction-pool/src/graph/pool.rs |  84 ++--
 .../transaction-pool/src/graph/ready.rs       |  10 +-
 .../transaction-pool/src/graph/tracked_map.rs |   5 +
 .../src/graph/validated_pool.rs               | 119 +++++-
 .../src/single_state_txpool/revalidation.rs   |   5 +-
 .../single_state_txpool.rs                    |  30 +-
 .../transaction-pool/tests/fatp_common/mod.rs |  19 +-
 .../transaction-pool/tests/fatp_prios.rs      | 317 +++++++++++++-
 .../client/transaction-pool/tests/pool.rs     |  14 +-
 .../runtime/transaction-pool/src/lib.rs       |  36 +-
 19 files changed, 1393 insertions(+), 251 deletions(-)
 create mode 100644 prdoc/pr_6647.prdoc

diff --git a/prdoc/pr_6647.prdoc b/prdoc/pr_6647.prdoc
new file mode 100644
index 00000000000..47af9924ef1
--- /dev/null
+++ b/prdoc/pr_6647.prdoc
@@ -0,0 +1,8 @@
+title: '`fatxpool`: proper handling of priorities when mempool is full'
+doc:
+- audience: Node Dev
+  description: |-
+    Higher-priority transactions can now replace lower-priority transactions even when the internal _tx_mem_pool_ is full.
+crates:
+- name: sc-transaction-pool
+  bump: minor
diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs
index d69aa37c94a..bf61558b00b 100644
--- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs
+++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs
@@ -53,11 +53,13 @@ pub struct DroppedTransaction<Hash> {
 }
 
 impl<Hash> DroppedTransaction<Hash> {
-	fn new_usurped(tx_hash: Hash, by: Hash) -> Self {
+	/// Creates a new instance with reason set to `DroppedReason::Usurped(by)`.
+	pub fn new_usurped(tx_hash: Hash, by: Hash) -> Self {
 		Self { reason: DroppedReason::Usurped(by), tx_hash }
 	}
 
-	fn new_enforced_by_limts(tx_hash: Hash) -> Self {
+	/// Creates a new instance with reason set to `DroppedReason::LimitsEnforced`.
+	pub fn new_enforced_by_limts(tx_hash: Hash) -> Self {
 		Self { reason: DroppedReason::LimitsEnforced, tx_hash }
 	}
 }
@@ -256,11 +258,13 @@ where
 				self.future_transaction_views.entry(tx_hash).or_default().insert(block_hash);
 			},
 			TransactionStatus::Ready | TransactionStatus::InBlock(..) => {
-				// note: if future transaction was once seens as the ready we may want to treat it
-				// as ready transactions. Unreferenced future transactions are more likely to be
-				// removed when the last referencing view is removed then ready transactions.
-				// Transcaction seen as ready is likely quite close to be included in some
-				// future fork.
+				// note: if future transaction was once seen as the ready we may want to treat it
+				// as ready transaction. The rationale behind this is as follows: we want to remove
+				// unreferenced future transactions when the last referencing view is removed (to
+				// avoid clogging mempool). For ready transactions we prefer to keep them in mempool
+				// even if no view is currently referencing them. Future transcaction once seen as
+				// ready is likely quite close to be included in some future fork (it is close to be
+				// ready, so we make exception and treat such transaction as ready).
 				if let Some(mut views) = self.future_transaction_views.remove(&tx_hash) {
 					views.insert(block_hash);
 					self.ready_transaction_views.insert(tx_hash, views);
diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs
index e57256943cc..76604571825 100644
--- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs
+++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs
@@ -31,7 +31,10 @@ use crate::{
 	api::FullChainApi,
 	common::log_xt::log_xt_trace,
 	enactment_state::{EnactmentAction, EnactmentState},
-	fork_aware_txpool::{dropped_watcher::DroppedReason, revalidation_worker},
+	fork_aware_txpool::{
+		dropped_watcher::{DroppedReason, DroppedTransaction},
+		revalidation_worker,
+	},
 	graph::{
 		self,
 		base_pool::{TimedTransactionSource, Transaction},
@@ -49,14 +52,16 @@ use futures::{
 use parking_lot::Mutex;
 use prometheus_endpoint::Registry as PrometheusRegistry;
 use sc_transaction_pool_api::{
-	ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolStatus, TransactionFor,
-	TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash,
+	error::Error as TxPoolApiError, ChainEvent, ImportNotificationStream,
+	MaintainedTransactionPool, PoolStatus, TransactionFor, TransactionPool, TransactionPriority,
+	TransactionSource, TransactionStatusStreamFor, TxHash,
 };
 use sp_blockchain::{HashAndNumber, TreeRoute};
 use sp_core::traits::SpawnEssentialNamed;
 use sp_runtime::{
 	generic::BlockId,
 	traits::{Block as BlockT, NumberFor},
+	transaction_validity::{TransactionValidityError, ValidTransaction},
 };
 use std::{
 	collections::{HashMap, HashSet},
@@ -287,7 +292,7 @@ where
 				DroppedReason::LimitsEnforced => {},
 			};
 
-			mempool.remove_dropped_transaction(&dropped_tx_hash).await;
+			mempool.remove_transaction(&dropped_tx_hash);
 			view_store.listener.transaction_dropped(dropped);
 			import_notification_sink.clean_notified_items(&[dropped_tx_hash]);
 		}
@@ -598,7 +603,7 @@ where
 /// out:
 /// [ Ok(xth0), Ok(xth1), Err ]
 /// ```
-fn reduce_multiview_result<H, E>(input: HashMap<H, Vec<Result<H, E>>>) -> Vec<Result<H, E>> {
+fn reduce_multiview_result<H, D, E>(input: HashMap<H, Vec<Result<D, E>>>) -> Vec<Result<D, E>> {
 	let mut values = input.values();
 	let Some(first) = values.next() else {
 		return Default::default();
@@ -650,9 +655,28 @@ where
 		let mempool_results = self.mempool.extend_unwatched(source, &xts);
 
 		if view_store.is_empty() {
-			return Ok(mempool_results.into_iter().map(|r| r.map(|r| r.hash)).collect::<Vec<_>>())
+			return Ok(mempool_results
+				.into_iter()
+				.map(|r| r.map(|r| r.hash).map_err(Into::into))
+				.collect::<Vec<_>>())
 		}
 
+		// Submit all the transactions to the mempool
+		let retries = mempool_results
+			.into_iter()
+			.zip(xts.clone())
+			.map(|(result, xt)| async move {
+				match result {
+					Err(TxPoolApiError::ImmediatelyDropped) =>
+						self.attempt_transaction_replacement(source, false, xt).await,
+					_ => result,
+				}
+			})
+			.collect::<Vec<_>>();
+
+		let mempool_results = futures::future::join_all(retries).await;
+
+		// Collect transactions that were successfully submitted to the mempool...
 		let to_be_submitted = mempool_results
 			.iter()
 			.zip(xts)
@@ -664,22 +688,47 @@ where
 		self.metrics
 			.report(|metrics| metrics.submitted_transactions.inc_by(to_be_submitted.len() as _));
 
+		// ... and submit them to the view_store. Please note that transactions rejected by mempool
+		// are not sent here.
 		let mempool = self.mempool.clone();
 		let results_map = view_store.submit(to_be_submitted.into_iter()).await;
 		let mut submission_results = reduce_multiview_result(results_map).into_iter();
 
+		// Note for composing final result:
+		//
+		// For each failed insertion into the mempool, the mempool result should be placed into
+		// the returned vector.
+		//
+		// For each successful insertion into the mempool, the corresponding
+		// view_store submission result needs to be examined:
+		// - If there is an error during view_store submission, the transaction is removed from
+		// the mempool, and the final result recorded in the vector for this transaction is the
+		// view_store submission error.
+		//
+		// - If the view_store submission is successful, the transaction priority is updated in the
+		// mempool.
+		//
+		// Finally, it collects the hashes of updated transactions or submission errors (either
+		// from the mempool or view_store) into a returned vector.
 		Ok(mempool_results
 				.into_iter()
 				.map(|result| {
-					result.and_then(|insertion| {
-						submission_results
-							.next()
-							.expect("The number of Ok results in mempool is exactly the same as the size of to-views-submission result. qed.")
-							.inspect_err(|_|
-								mempool.remove(insertion.hash)
-							)
+					result
+						.map_err(Into::into)
+						.and_then(|insertion| {
+							submission_results
+								.next()
+								.expect("The number of Ok results in mempool is exactly the same as the size of view_store submission result. qed.")
+								.inspect_err(|_|{
+									mempool.remove_transaction(&insertion.hash);
+								})
 					})
+
 				})
+				.map(|r| r.map(|r| {
+					mempool.update_transaction_priority(&r);
+					r.hash()
+				}))
 				.collect::<Vec<_>>())
 	}
 
@@ -712,10 +761,13 @@ where
 	) -> Result<Pin<Box<TransactionStatusStreamFor<Self>>>, Self::Error> {
 		log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count());
 		let xt = Arc::from(xt);
-		let InsertionInfo { hash: xt_hash, source: timed_source } =
+
+		let InsertionInfo { hash: xt_hash, source: timed_source, .. } =
 			match self.mempool.push_watched(source, xt.clone()) {
 				Ok(result) => result,
-				Err(e) => return Err(e),
+				Err(TxPoolApiError::ImmediatelyDropped) =>
+					self.attempt_transaction_replacement(source, true, xt.clone()).await?,
+				Err(e) => return Err(e.into()),
 			};
 
 		self.metrics.report(|metrics| metrics.submitted_transactions.inc());
@@ -723,7 +775,13 @@ where
 		self.view_store
 			.submit_and_watch(at, timed_source, xt)
 			.await
-			.inspect_err(|_| self.mempool.remove(xt_hash))
+			.inspect_err(|_| {
+				self.mempool.remove_transaction(&xt_hash);
+			})
+			.map(|mut outcome| {
+				self.mempool.update_transaction_priority(&outcome);
+				outcome.expect_watcher()
+			})
 	}
 
 	/// Intended to remove transactions identified by the given hashes, and any dependent
@@ -828,22 +886,16 @@ where
 	}
 }
 
-impl<Block, Client> sc_transaction_pool_api::LocalTransactionPool
-	for ForkAwareTxPool<FullChainApi<Client, Block>, Block>
+impl<ChainApi, Block> sc_transaction_pool_api::LocalTransactionPool
+	for ForkAwareTxPool<ChainApi, Block>
 where
 	Block: BlockT,
+	ChainApi: 'static + graph::ChainApi<Block = Block>,
 	<Block as BlockT>::Hash: Unpin,
-	Client: sp_api::ProvideRuntimeApi<Block>
-		+ sc_client_api::BlockBackend<Block>
-		+ sc_client_api::blockchain::HeaderBackend<Block>
-		+ sp_runtime::traits::BlockIdTo<Block>
-		+ sp_blockchain::HeaderMetadata<Block, Error = sp_blockchain::Error>,
-	Client: Send + Sync + 'static,
-	Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>,
 {
 	type Block = Block;
-	type Hash = ExtrinsicHash<FullChainApi<Client, Block>>;
-	type Error = <FullChainApi<Client, Block> as graph::ChainApi>::Error;
+	type Hash = ExtrinsicHash<ChainApi>;
+	type Error = ChainApi::Error;
 
 	fn submit_local(
 		&self,
@@ -852,12 +904,29 @@ where
 	) -> Result<Self::Hash, Self::Error> {
 		log::debug!(target: LOG_TARGET, "fatp::submit_local views:{}", self.active_views_count());
 		let xt = Arc::from(xt);
-		let InsertionInfo { hash: xt_hash, .. } = self
-			.mempool
-			.extend_unwatched(TransactionSource::Local, &[xt.clone()])
-			.remove(0)?;
 
-		self.view_store.submit_local(xt).or_else(|_| Ok(xt_hash))
+		let result =
+			self.mempool.extend_unwatched(TransactionSource::Local, &[xt.clone()]).remove(0);
+
+		let insertion = match result {
+			Err(TxPoolApiError::ImmediatelyDropped) => self.attempt_transaction_replacement_sync(
+				TransactionSource::Local,
+				false,
+				xt.clone(),
+			),
+			_ => result,
+		}?;
+
+		self.view_store
+			.submit_local(xt)
+			.inspect_err(|_| {
+				self.mempool.remove_transaction(&insertion.hash);
+			})
+			.map(|outcome| {
+				self.mempool.update_transaction_priority(&outcome);
+				outcome.hash()
+			})
+			.or_else(|_| Ok(insertion.hash))
 	}
 }
 
@@ -1109,7 +1178,11 @@ where
 			.await
 			.into_iter()
 			.zip(hashes)
-			.map(|(result, tx_hash)| result.or_else(|_| Err(tx_hash)))
+			.map(|(result, tx_hash)| {
+				result
+					.map(|outcome| self.mempool.update_transaction_priority(&outcome.into()))
+					.or_else(|_| Err(tx_hash))
+			})
 			.collect::<Vec<_>>();
 
 		let submitted_count = watched_results.len();
@@ -1131,7 +1204,7 @@ where
 			for result in watched_results {
 				if let Err(tx_hash) = result {
 					self.view_store.listener.invalidate_transactions(&[tx_hash]);
-					self.mempool.remove(tx_hash);
+					self.mempool.remove_transaction(&tx_hash);
 				}
 			}
 		}
@@ -1263,6 +1336,101 @@ where
 	fn tx_hash(&self, xt: &TransactionFor<Self>) -> TxHash<Self> {
 		self.api.hash_and_length(xt).0
 	}
+
+	/// Attempts to find and replace a lower-priority transaction in the transaction pool with a new
+	/// one.
+	///
+	/// This asynchronous function verifies the new transaction against the most recent view. If a
+	/// transaction with a lower priority exists in the transaction pool, it is replaced with the
+	/// new transaction.
+	///
+	/// If no lower-priority transaction is found, the function returns an error indicating the
+	/// transaction was dropped immediately.
+	async fn attempt_transaction_replacement(
+		&self,
+		source: TransactionSource,
+		watched: bool,
+		xt: ExtrinsicFor<ChainApi>,
+	) -> Result<InsertionInfo<ExtrinsicHash<ChainApi>>, TxPoolApiError> {
+		let at = self
+			.view_store
+			.most_recent_view
+			.read()
+			.ok_or(TxPoolApiError::ImmediatelyDropped)?;
+
+		let (best_view, _) = self
+			.view_store
+			.get_view_at(at, false)
+			.ok_or(TxPoolApiError::ImmediatelyDropped)?;
+
+		let (xt_hash, validated_tx) = best_view
+			.pool
+			.verify_one(
+				best_view.at.hash,
+				best_view.at.number,
+				TimedTransactionSource::from_transaction_source(source, false),
+				xt.clone(),
+				crate::graph::CheckBannedBeforeVerify::Yes,
+			)
+			.await;
+
+		let Some(priority) = validated_tx.priority() else {
+			return Err(TxPoolApiError::ImmediatelyDropped)
+		};
+
+		self.attempt_transaction_replacement_inner(xt, xt_hash, priority, source, watched)
+	}
+
+	/// Sync version of [`Self::attempt_transaction_replacement`].
+	fn attempt_transaction_replacement_sync(
+		&self,
+		source: TransactionSource,
+		watched: bool,
+		xt: ExtrinsicFor<ChainApi>,
+	) -> Result<InsertionInfo<ExtrinsicHash<ChainApi>>, TxPoolApiError> {
+		let at = self
+			.view_store
+			.most_recent_view
+			.read()
+			.ok_or(TxPoolApiError::ImmediatelyDropped)?;
+
+		let ValidTransaction { priority, .. } = self
+			.api
+			.validate_transaction_blocking(at, TransactionSource::Local, Arc::from(xt.clone()))
+			.map_err(|_| TxPoolApiError::ImmediatelyDropped)?
+			.map_err(|e| match e {
+				TransactionValidityError::Invalid(i) => TxPoolApiError::InvalidTransaction(i),
+				TransactionValidityError::Unknown(u) => TxPoolApiError::UnknownTransaction(u),
+			})?;
+		let xt_hash = self.hash_of(&xt);
+		self.attempt_transaction_replacement_inner(xt, xt_hash, priority, source, watched)
+	}
+
+	fn attempt_transaction_replacement_inner(
+		&self,
+		xt: ExtrinsicFor<ChainApi>,
+		tx_hash: ExtrinsicHash<ChainApi>,
+		priority: TransactionPriority,
+		source: TransactionSource,
+		watched: bool,
+	) -> Result<InsertionInfo<ExtrinsicHash<ChainApi>>, TxPoolApiError> {
+		let insertion_info =
+			self.mempool.try_insert_with_replacement(xt, priority, source, watched)?;
+
+		for worst_hash in &insertion_info.removed {
+			log::trace!(target: LOG_TARGET, "removed: {worst_hash:?} replaced by {tx_hash:?}");
+			self.view_store
+				.listener
+				.transaction_dropped(DroppedTransaction::new_enforced_by_limts(*worst_hash));
+
+			self.view_store
+				.remove_transaction_subtree(*worst_hash, |listener, removed_tx_hash| {
+					listener.limits_enforced(&removed_tx_hash);
+				});
+		}
+
+		return Ok(insertion_info)
+	}
 }
 
 #[async_trait]
@@ -1410,7 +1578,7 @@ mod reduce_multiview_result_tests {
 	fn empty() {
 		sp_tracing::try_init_simple();
 		let input = HashMap::default();
-		let r = reduce_multiview_result::<H256, Error>(input);
+		let r = reduce_multiview_result::<H256, H256, Error>(input);
 		assert!(r.is_empty());
 	}
 
diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs
index 989ae4425dc..c8a4d0c72dd 100644
--- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs
+++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs
@@ -26,7 +26,10 @@
 //!   it), while on other forks tx can be valid. Depending on which view is chosen to be cloned,
 //!   such transaction could not be present in the newly created view.
 
-use super::{metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener};
+use super::{
+	metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener,
+	view_store::ViewStoreSubmitOutcome,
+};
 use crate::{
 	common::log_xt::log_xt_trace,
 	graph,
@@ -35,15 +38,20 @@ use crate::{
 };
 use futures::FutureExt;
 use itertools::Itertools;
-use sc_transaction_pool_api::TransactionSource;
+use parking_lot::RwLock;
+use sc_transaction_pool_api::{TransactionPriority, TransactionSource};
 use sp_blockchain::HashAndNumber;
 use sp_runtime::{
 	traits::Block as BlockT,
 	transaction_validity::{InvalidTransaction, TransactionValidityError},
 };
 use std::{
+	cmp::Ordering,
 	collections::HashMap,
-	sync::{atomic, atomic::AtomicU64, Arc},
+	sync::{
+		atomic::{self, AtomicU64},
+		Arc,
+	},
 	time::Instant,
 };
 
@@ -77,6 +85,9 @@ where
 	source: TimedTransactionSource,
 	/// When the transaction was revalidated, used to periodically revalidate the mem pool buffer.
 	validated_at: AtomicU64,
+	/// Priority of transaction at some block. It is assumed it will not be changed often. None if
+	/// not known.
+	priority: RwLock<Option<TransactionPriority>>,
 	//todo: we need to add future / ready status at finalized block.
 	//If future transactions are stuck in tx_mem_pool (due to limits being hit), we need a means
 	// to replace them somehow with newly coming transactions.
@@ -101,23 +112,50 @@ where
 
 	/// Creates a new instance of wrapper for unwatched transaction.
 	fn new_unwatched(source: TransactionSource, tx: ExtrinsicFor<ChainApi>, bytes: usize) -> Self {
-		Self {
-			watched: false,
-			tx,
-			source: TimedTransactionSource::from_transaction_source(source, true),
-			validated_at: AtomicU64::new(0),
-			bytes,
-		}
+		Self::new(false, source, tx, bytes)
 	}
 
 	/// Creates a new instance of wrapper for watched transaction.
 	fn new_watched(source: TransactionSource, tx: ExtrinsicFor<ChainApi>, bytes: usize) -> Self {
+		Self::new(true, source, tx, bytes)
+	}
+
+	/// Creates a new instance of wrapper for a transaction with no priority.
+	fn new(
+		watched: bool,
+		source: TransactionSource,
+		tx: ExtrinsicFor<ChainApi>,
+		bytes: usize,
+	) -> Self {
+		Self::new_with_optional_priority(watched, source, tx, bytes, None)
+	}
+
+	/// Creates a new instance of wrapper for a transaction with given priority.
+	fn new_with_priority(
+		watched: bool,
+		source: TransactionSource,
+		tx: ExtrinsicFor<ChainApi>,
+		bytes: usize,
+		priority: TransactionPriority,
+	) -> Self {
+		Self::new_with_optional_priority(watched, source, tx, bytes, Some(priority))
+	}
+
+	/// Creates a new instance of wrapper for a transaction with optional priority.
+	fn new_with_optional_priority(
+		watched: bool,
+		source: TransactionSource,
+		tx: ExtrinsicFor<ChainApi>,
+		bytes: usize,
+		priority: Option<TransactionPriority>,
+	) -> Self {
 		Self {
-			watched: true,
+			watched,
 			tx,
 			source: TimedTransactionSource::from_transaction_source(source, true),
 			validated_at: AtomicU64::new(0),
 			bytes,
+			priority: priority.into(),
 		}
 	}
 
@@ -132,6 +170,11 @@ where
 	pub(crate) fn source(&self) -> TimedTransactionSource {
 		self.source.clone()
 	}
+
+	/// Returns the priority of the transaction.
+	pub(crate) fn priority(&self) -> Option<TransactionPriority> {
+		*self.priority.read()
+	}
 }
 
 impl<ChainApi, Block> Size for Arc<TxInMemPool<ChainApi, Block>>
@@ -191,11 +234,15 @@ where
 pub(super) struct InsertionInfo<Hash> {
 	pub(super) hash: Hash,
 	pub(super) source: TimedTransactionSource,
+	pub(super) removed: Vec<Hash>,
 }
 
 impl<Hash> InsertionInfo<Hash> {
 	fn new(hash: Hash, source: TimedTransactionSource) -> Self {
-		Self { hash, source }
+		Self::new_with_removed(hash, source, Default::default())
+	}
+	fn new_with_removed(hash: Hash, source: TimedTransactionSource, removed: Vec<Hash>) -> Self {
+		Self { hash, source, removed }
 	}
 }
 
@@ -279,27 +326,109 @@ where
 		&self,
 		hash: ExtrinsicHash<ChainApi>,
 		tx: TxInMemPool<ChainApi, Block>,
-	) -> Result<InsertionInfo<ExtrinsicHash<ChainApi>>, ChainApi::Error> {
-		let bytes = self.transactions.bytes();
+	) -> Result<InsertionInfo<ExtrinsicHash<ChainApi>>, sc_transaction_pool_api::error::Error> {
 		let mut transactions = self.transactions.write();
+
+		let bytes = self.transactions.bytes();
+
 		let result = match (
-			!self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes),
+			self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes),
 			transactions.contains_key(&hash),
 		) {
-			(true, false) => {
+			(false, false) => {
 				let source = tx.source();
 				transactions.insert(hash, Arc::from(tx));
 				Ok(InsertionInfo::new(hash, source))
 			},
 			(_, true) =>
-				Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash)).into()),
-			(false, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped.into()),
+				Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))),
+			(true, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped),
 		};
 		log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result.as_ref().map(|r| r.hash));
 
 		result
 	}
 
+	/// Attempts to insert a new transaction in the memory pool and drop some worse existing
+	/// transactions.
+	///
+	/// A "worse" transaction means transaction with lower priority, or older transaction with the
+	/// same prio.
+	///
+	/// This operation will not overflow the limit of the mempool. It means that cumulative
+	/// size of removed transactions will be equal (or greated) then size of newly inserted
+	/// transaction.
+	///
+	/// Returns a `Result` containing `InsertionInfo` if the new transaction is successfully
+	/// inserted; otherwise, returns an appropriate error indicating the failure.
+	pub(super) fn try_insert_with_replacement(
+		&self,
+		new_tx: ExtrinsicFor<ChainApi>,
+		priority: TransactionPriority,
+		source: TransactionSource,
+		watched: bool,
+	) -> Result<InsertionInfo<ExtrinsicHash<ChainApi>>, sc_transaction_pool_api::error::Error> {
+		let (hash, length) = self.api.hash_and_length(&new_tx);
+		let new_tx = TxInMemPool::new_with_priority(watched, source, new_tx, length, priority);
+		if new_tx.bytes > self.max_transactions_total_bytes {
+			return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped);
+		}
+
+		let mut transactions = self.transactions.write();
+
+		if transactions.contains_key(&hash) {
+			return Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash)));
+		}
+
+		let mut sorted = transactions
+			.iter()
+			.filter_map(|(h, v)| v.priority().map(|_| (*h, v.clone())))
+			.collect::<Vec<_>>();
+
+		// When pushing higher prio transaction, we need to find a number of lower prio txs, such
+		// that the sum of their bytes is ge then size of new tx. Otherwise we could overflow size
+		// limits. Naive way to do it - rev-sort by priority and eat the tail.
+
+		// reverse (oldest, lowest prio last)
+		sorted.sort_by(|(_, a), (_, b)| match b.priority().cmp(&a.priority()) {
+			Ordering::Equal => match (a.source.timestamp, b.source.timestamp) {
+				(Some(a), Some(b)) => b.cmp(&a),
+				_ => Ordering::Equal,
+			},
+			ordering => ordering,
+		});
+
+		let mut total_size_removed = 0usize;
+		let mut to_be_removed = vec![];
+		let free_bytes = self.max_transactions_total_bytes - self.transactions.bytes();
+
+		loop {
+			let Some((worst_hash, worst_tx)) = sorted.pop() else {
+				return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped);
+			};
+
+			if worst_tx.priority() >= new_tx.priority() {
+				return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped);
+			}
+
+			total_size_removed += worst_tx.bytes;
+			to_be_removed.push(worst_hash);
+
+			if free_bytes + total_size_removed >= new_tx.bytes {
+				break;
+			}
+		}
+
+		let source = new_tx.source();
+		transactions.insert(hash, Arc::from(new_tx));
+		for worst_hash in &to_be_removed {
+			transactions.remove(worst_hash);
+		}
+		debug_assert!(!self.is_limit_exceeded(transactions.len(), self.transactions.bytes()));
+
+		Ok(InsertionInfo::new_with_removed(hash, source, to_be_removed))
+	}
+
 	/// Adds a new unwatched transactions to the internal buffer not exceeding the limit.
 	///
 	/// Returns the vector of results for each transaction, the order corresponds to the input
@@ -308,7 +437,8 @@ where
 		&self,
 		source: TransactionSource,
 		xts: &[ExtrinsicFor<ChainApi>],
-	) -> Vec<Result<InsertionInfo<ExtrinsicHash<ChainApi>>, ChainApi::Error>> {
+	) -> Vec<Result<InsertionInfo<ExtrinsicHash<ChainApi>>, sc_transaction_pool_api::error::Error>>
+	{
 		let result = xts
 			.iter()
 			.map(|xt| {
@@ -325,20 +455,11 @@ where
 		&self,
 		source: TransactionSource,
 		xt: ExtrinsicFor<ChainApi>,
-	) -> Result<InsertionInfo<ExtrinsicHash<ChainApi>>, ChainApi::Error> {
+	) -> Result<InsertionInfo<ExtrinsicHash<ChainApi>>, sc_transaction_pool_api::error::Error> {
 		let (hash, length) = self.api.hash_and_length(&xt);
 		self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length))
 	}
 
-	/// Removes transaction from the memory pool which are specified by the given list of hashes.
-	pub(super) async fn remove_dropped_transaction(
-		&self,
-		dropped: &ExtrinsicHash<ChainApi>,
-	) -> Option<Arc<TxInMemPool<ChainApi, Block>>> {
-		log::debug!(target: LOG_TARGET, "[{:?}] mempool::remove_dropped_transaction", dropped);
-		self.transactions.write().remove(dropped)
-	}
-
 	/// Clones and returns a `HashMap` of references to all unwatched transactions in the memory
 	/// pool.
 	pub(super) fn clone_unwatched(
@@ -362,9 +483,13 @@ where
 			.collect::<HashMap<_, _>>()
 	}
 
-	/// Removes a transaction from the memory pool based on a given hash.
-	pub(super) fn remove(&self, hash: ExtrinsicHash<ChainApi>) {
-		let _ = self.transactions.write().remove(&hash);
+	/// Removes a transaction with given hash from the memory pool.
+	pub(super) fn remove_transaction(
+		&self,
+		hash: &ExtrinsicHash<ChainApi>,
+	) -> Option<Arc<TxInMemPool<ChainApi, Block>>> {
+		log::debug!(target: LOG_TARGET, "[{hash:?}] mempool::remove_transaction");
+		self.transactions.write().remove(hash)
 	}
 
 	/// Revalidates a batch of transactions against the provided finalized block.
@@ -462,6 +587,17 @@ where
 		});
 		self.listener.invalidate_transactions(&invalid_hashes);
 	}
+
+	/// Updates the priority of transaction stored in mempool using provided view_store submission
+	/// outcome.
+	pub(super) fn update_transaction_priority(&self, outcome: &ViewStoreSubmitOutcome<ChainApi>) {
+		outcome.priority().map(|priority| {
+			self.transactions
+				.write()
+				.get_mut(&outcome.hash())
+				.map(|p| *p.priority.write() = Some(priority))
+		});
+	}
 }
 
 #[cfg(test)]
@@ -583,6 +719,9 @@ mod tx_mem_pool_tests {
 		assert_eq!(mempool.unwatched_and_watched_count(), (10, 5));
 	}
 
+	/// size of large extrinsic
+	const LARGE_XT_SIZE: usize = 1129;
+
 	fn large_uxt(x: usize) -> Extrinsic {
 		ExtrinsicBuilder::new_include_data(vec![x as u8; 1024]).build()
 	}
@@ -592,8 +731,7 @@ mod tx_mem_pool_tests {
 		sp_tracing::try_init_simple();
 		let max = 10;
 		let api = Arc::from(TestApi::default());
-		//size of large extrinsic is: 1129
-		let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * 1129);
+		let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * LARGE_XT_SIZE);
 
 		let xts = (0..max).map(|x| Arc::from(large_uxt(x))).collect::<Vec<_>>();
 
@@ -617,4 +755,200 @@ mod tx_mem_pool_tests {
 			sc_transaction_pool_api::error::Error::ImmediatelyDropped
 		));
 	}
+
+	#[test]
+	fn replacing_txs_works_for_same_tx_size() {
+		sp_tracing::try_init_simple();
+		let max = 10;
+		let api = Arc::from(TestApi::default());
+		let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * LARGE_XT_SIZE);
+
+		let xts = (0..max).map(|x| Arc::from(large_uxt(x))).collect::<Vec<_>>();
+
+		let low_prio = 0u64;
+		let hi_prio = u64::MAX;
+
+		let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1);
+		let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts
+			.iter()
+			.map(|t| {
+				let h = api.hash_and_length(t).0;
+				(ViewStoreSubmitOutcome::new(h, Some(low_prio)), h)
+			})
+			.unzip();
+
+		let results = mempool.extend_unwatched(TransactionSource::External, &xts);
+		assert!(results.iter().all(Result::is_ok));
+		assert_eq!(mempool.bytes(), total_xts_bytes);
+
+		submit_outcomes
+			.into_iter()
+			.for_each(|o| mempool.update_transaction_priority(&o));
+
+		let xt = Arc::from(large_uxt(98));
+		let hash = api.hash_and_length(&xt).0;
+		let result = mempool
+			.try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false)
+			.unwrap();
+
+		assert_eq!(result.hash, hash);
+		assert_eq!(result.removed, hashes[0..1]);
+	}
+
+	#[test]
+	fn replacing_txs_removes_proper_size_of_txs() {
+		sp_tracing::try_init_simple();
+		let max = 10;
+		let api = Arc::from(TestApi::default());
+		let mempool = TxMemPool::new_test(api.clone(), usize::MAX, max * LARGE_XT_SIZE);
+
+		let xts = (0..max).map(|x| Arc::from(large_uxt(x))).collect::<Vec<_>>();
+
+		let low_prio = 0u64;
+		let hi_prio = u64::MAX;
+
+		let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1);
+		let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts
+			.iter()
+			.map(|t| {
+				let h = api.hash_and_length(t).0;
+				(ViewStoreSubmitOutcome::new(h, Some(low_prio)), h)
+			})
+			.unzip();
+
+		let results = mempool.extend_unwatched(TransactionSource::External, &xts);
+		assert!(results.iter().all(Result::is_ok));
+		assert_eq!(mempool.bytes(), total_xts_bytes);
+		assert_eq!(total_xts_bytes, max * LARGE_XT_SIZE);
+
+		submit_outcomes
+			.into_iter()
+			.for_each(|o| mempool.update_transaction_priority(&o));
+
+		//this one should drop 2 xts (size: 1130):
+		let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 1025]).build());
+		let (hash, length) = api.hash_and_length(&xt);
+		assert_eq!(length, 1130);
+		let result = mempool
+			.try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false)
+			.unwrap();
+
+		assert_eq!(result.hash, hash);
+		assert_eq!(result.removed, hashes[0..2]);
+	}
+
+	#[test]
+	fn replacing_txs_removes_proper_size_and_prios() {
+		sp_tracing::try_init_simple();
+		const COUNT: usize = 10;
+		let api = Arc::from(TestApi::default());
+		let mempool = TxMemPool::new_test(api.clone(), usize::MAX, COUNT * LARGE_XT_SIZE);
+
+		let xts = (0..COUNT).map(|x| Arc::from(large_uxt(x))).collect::<Vec<_>>();
+
+		let hi_prio = u64::MAX;
+
+		let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1);
+		let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts
+			.iter()
+			.enumerate()
+			.map(|(prio, t)| {
+				let h = api.hash_and_length(t).0;
+				(ViewStoreSubmitOutcome::new(h, Some((COUNT - prio).try_into().unwrap())), h)
+			})
+			.unzip();
+
+		let results = mempool.extend_unwatched(TransactionSource::External, &xts);
+		assert!(results.iter().all(Result::is_ok));
+		assert_eq!(mempool.bytes(), total_xts_bytes);
+
+		submit_outcomes
+			.into_iter()
+			.for_each(|o| mempool.update_transaction_priority(&o));
+
+		//this one should drop 3 xts (each of size 1129)
+		let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 2154]).build());
+		let (hash, length) = api.hash_and_length(&xt);
+		// overhead is 105, thus length: 105 + 2154
+		assert_eq!(length, 2 * LARGE_XT_SIZE + 1);
+		let result = mempool
+			.try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false)
+			.unwrap();
+
+		assert_eq!(result.hash, hash);
+		assert!(result.removed.iter().eq(hashes[COUNT - 3..COUNT].iter().rev()));
+	}
+
+	#[test]
+	fn replacing_txs_skips_lower_prio_tx() {
+		sp_tracing::try_init_simple();
+		const COUNT: usize = 10;
+		let api = Arc::from(TestApi::default());
+		let mempool = TxMemPool::new_test(api.clone(), usize::MAX, COUNT * LARGE_XT_SIZE);
+
+		let xts = (0..COUNT).map(|x| Arc::from(large_uxt(x))).collect::<Vec<_>>();
+
+		let hi_prio = 100u64;
+		let low_prio = 10u64;
+
+		let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1);
+		let submit_outcomes = xts
+			.iter()
+			.map(|t| {
+				let h = api.hash_and_length(t).0;
+				ViewStoreSubmitOutcome::new(h, Some(hi_prio))
+			})
+			.collect::<Vec<_>>();
+
+		let results = mempool.extend_unwatched(TransactionSource::External, &xts);
+		assert!(results.iter().all(Result::is_ok));
+		assert_eq!(mempool.bytes(), total_xts_bytes);
+
+		submit_outcomes
+			.into_iter()
+			.for_each(|o| mempool.update_transaction_priority(&o));
+
+		let xt = Arc::from(large_uxt(98));
+		let result =
+			mempool.try_insert_with_replacement(xt, low_prio, TransactionSource::External, false);
+
+		// lower prio tx is rejected immediately
+		assert!(matches!(
+			result.unwrap_err(),
+			sc_transaction_pool_api::error::Error::ImmediatelyDropped
+		));
+	}
+
+	#[test]
+	fn replacing_txs_is_skipped_if_prios_are_not_set() {
+		sp_tracing::try_init_simple();
+		const COUNT: usize = 10;
+		let api = Arc::from(TestApi::default());
+		let mempool = TxMemPool::new_test(api.clone(), usize::MAX, COUNT * LARGE_XT_SIZE);
+
+		let xts = (0..COUNT).map(|x| Arc::from(large_uxt(x))).collect::<Vec<_>>();
+
+		let hi_prio = u64::MAX;
+
+		let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1);
+
+		let results = mempool.extend_unwatched(TransactionSource::External, &xts);
+		assert!(results.iter().all(Result::is_ok));
+		assert_eq!(mempool.bytes(), total_xts_bytes);
+
+		//this one could drop 3 xts (each of size 1129)
+		let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 2154]).build());
+		let length = api.hash_and_length(&xt).1;
+		// overhead is 105, thus length: 105 + 2154
+		assert_eq!(length, 2 * LARGE_XT_SIZE + 1);
+
+		let result =
+			mempool.try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false);
+
+		// we did not update priorities (update_transaction_priority was not called):
+		assert!(matches!(
+			result.unwrap_err(),
+			sc_transaction_pool_api::error::Error::ImmediatelyDropped
+		));
+	}
 }
diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs
index 3cbb8fa4871..a35d68120a3 100644
--- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs
+++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs
@@ -28,7 +28,7 @@ use crate::{
 	common::log_xt::log_xt_trace,
 	graph::{
 		self, base_pool::TimedTransactionSource, watcher::Watcher, ExtrinsicFor, ExtrinsicHash,
-		IsValidator, ValidatedTransaction, ValidatedTransactionFor,
+		IsValidator, ValidatedPoolSubmitOutcome, ValidatedTransaction, ValidatedTransactionFor,
 	},
 	LOG_TARGET,
 };
@@ -158,7 +158,7 @@ where
 	pub(super) async fn submit_many(
 		&self,
 		xts: impl IntoIterator<Item = (TimedTransactionSource, ExtrinsicFor<ChainApi>)>,
-	) -> Vec<Result<ExtrinsicHash<ChainApi>, ChainApi::Error>> {
+	) -> Vec<Result<ValidatedPoolSubmitOutcome<ChainApi>, ChainApi::Error>> {
 		if log::log_enabled!(target: LOG_TARGET, log::Level::Trace) {
 			let xts = xts.into_iter().collect::<Vec<_>>();
 			log_xt_trace!(target: LOG_TARGET, xts.iter().map(|(_,xt)| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash);
@@ -173,7 +173,7 @@ where
 		&self,
 		source: TimedTransactionSource,
 		xt: ExtrinsicFor<ChainApi>,
-	) -> Result<Watcher<ExtrinsicHash<ChainApi>, ExtrinsicHash<ChainApi>>, ChainApi::Error> {
+	) -> Result<ValidatedPoolSubmitOutcome<ChainApi>, ChainApi::Error> {
 		log::trace!(target: LOG_TARGET, "[{:?}] view::submit_and_watch at:{}", self.pool.validated_pool().api().hash_and_length(&xt).0, self.at.hash);
 		self.pool.submit_and_watch(&self.at, source, xt).await
 	}
@@ -182,7 +182,7 @@ where
 	pub(super) fn submit_local(
 		&self,
 		xt: ExtrinsicFor<ChainApi>,
-	) -> Result<ExtrinsicHash<ChainApi>, ChainApi::Error> {
+	) -> Result<ValidatedPoolSubmitOutcome<ChainApi>, ChainApi::Error> {
 		let (hash, length) = self.pool.validated_pool().api().hash_and_length(&xt);
 		log::trace!(target: LOG_TARGET, "[{:?}] view::submit_local at:{}", hash, self.at.hash);
 
@@ -460,4 +460,18 @@ where
 		const IGNORE_BANNED: bool = false;
 		self.pool.validated_pool().check_is_known(tx_hash, IGNORE_BANNED).is_err()
 	}
+
+	/// Removes the whole transaction subtree from the inner pool.
+	///
+	/// Refer to [`crate::graph::ValidatedPool::remove_subtree`] for more details.
+	pub fn remove_subtree<F>(
+		&self,
+		tx_hash: ExtrinsicHash<ChainApi>,
+		listener_action: F,
+	) -> Vec<ExtrinsicHash<ChainApi>>
+	where
+		F: Fn(&mut crate::graph::Listener<ChainApi>, ExtrinsicHash<ChainApi>),
+	{
+		self.pool.validated_pool().remove_subtree(tx_hash, listener_action)
+	}
 }
diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs
index a06c051f0a7..43ed5bbf886 100644
--- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs
+++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs
@@ -27,7 +27,7 @@ use crate::{
 	graph::{
 		self,
 		base_pool::{TimedTransactionSource, Transaction},
-		ExtrinsicFor, ExtrinsicHash, TransactionFor,
+		BaseSubmitOutcome, ExtrinsicFor, ExtrinsicHash, TransactionFor, ValidatedPoolSubmitOutcome,
 	},
 	ReadyIteratorFor, LOG_TARGET,
 };
@@ -38,20 +38,18 @@ use sc_transaction_pool_api::{error::Error as PoolError, PoolStatus};
 use sp_blockchain::TreeRoute;
 use sp_runtime::{generic::BlockId, traits::Block as BlockT};
 use std::{
-	collections::{hash_map::Entry, HashMap},
+	collections::{hash_map::Entry, HashMap, HashSet},
 	sync::Arc,
 	time::Instant,
 };
 
-/// Helper struct to keep the context for transaction replacements.
+/// Helper struct to maintain the context for pending transaction submission, executed for
+/// newly inserted views.
 #[derive(Clone)]
-struct PendingTxReplacement<ChainApi>
+struct PendingTxSubmission<ChainApi>
 where
 	ChainApi: graph::ChainApi,
 {
-	/// Indicates if the new transaction was already submitted to all the views in the view_store.
-	/// If true, it can be removed after inserting any new view.
-	processed: bool,
 	/// New transaction replacing the old one.
 	xt: ExtrinsicFor<ChainApi>,
 	/// Source of the transaction.
@@ -60,13 +58,84 @@ where
 	watched: bool,
 }
 
-impl<ChainApi> PendingTxReplacement<ChainApi>
+/// Helper type representing the callback allowing to trigger per-transaction events on
+/// `ValidatedPool`'s listener.
+type RemovalListener<ChainApi> =
+	Arc<dyn Fn(&mut crate::graph::Listener<ChainApi>, ExtrinsicHash<ChainApi>) + Send + Sync>;
+
+/// Helper struct to maintain the context for pending transaction removal, executed for
+/// newly inserted views.
+struct PendingTxRemoval<ChainApi>
+where
+	ChainApi: graph::ChainApi,
+{
+	/// Hash of the transaction that will be removed,
+	xt_hash: ExtrinsicHash<ChainApi>,
+	/// Action that shall be executed on underlying `ValidatedPool`'s listener.
+	listener_action: RemovalListener<ChainApi>,
+}
+
+/// This enum represents an action that should be executed on the newly built
+/// view before this view is inserted into the view store.
+enum PreInsertAction<ChainApi>
+where
+	ChainApi: graph::ChainApi,
+{
+	/// Represents the action of submitting a new transaction. Intended to use to handle usurped
+	/// transactions.
+	SubmitTx(PendingTxSubmission<ChainApi>),
+
+	/// Represents the action of removing a subtree of transactions.
+	RemoveSubtree(PendingTxRemoval<ChainApi>),
+}
+
+/// Represents a task awaiting execution, to be performed immediately prior to the view insertion
+/// into the view store.
+struct PendingPreInsertTask<ChainApi>
+where
+	ChainApi: graph::ChainApi,
+{
+	/// The action to be applied when inserting a new view.
+	action: PreInsertAction<ChainApi>,
+	/// Indicates if the action was already applied to all the views in the view_store.
+	/// If true, it can be removed after inserting any new view.
+	processed: bool,
+}
+
+impl<ChainApi> PendingPreInsertTask<ChainApi>
 where
 	ChainApi: graph::ChainApi,
 {
-	/// Creates new unprocessed instance of pending transaction replacement.
-	fn new(xt: ExtrinsicFor<ChainApi>, source: TimedTransactionSource, watched: bool) -> Self {
-		Self { processed: false, xt, source, watched }
+	/// Creates new unprocessed instance of pending transaction submission.
+	fn new_submission_action(
+		xt: ExtrinsicFor<ChainApi>,
+		source: TimedTransactionSource,
+		watched: bool,
+	) -> Self {
+		Self {
+			processed: false,
+			action: PreInsertAction::SubmitTx(PendingTxSubmission { xt, source, watched }),
+		}
+	}
+
+	/// Creates new unprocessed instance of pending transaction removal.
+	fn new_removal_action(
+		xt_hash: ExtrinsicHash<ChainApi>,
+		listener: RemovalListener<ChainApi>,
+	) -> Self {
+		Self {
+			processed: false,
+			action: PreInsertAction::RemoveSubtree(PendingTxRemoval {
+				xt_hash,
+				listener_action: listener,
+			}),
+		}
+	}
+
+	/// Marks a task as done for every view present in view store. Basically means that can be
+	/// removed on new view insertion.
+	fn mark_processed(&mut self) {
+		self.processed = true;
 	}
 }
 
@@ -100,9 +169,20 @@ where
 	/// notifcication threads. It is meant to assure that replaced transaction is also removed from
 	/// newly built views in maintain process.
 	///
-	/// The map's key is hash of replaced extrinsic.
-	pending_txs_replacements:
-		RwLock<HashMap<ExtrinsicHash<ChainApi>, PendingTxReplacement<ChainApi>>>,
+	/// The map's key is hash of actionable extrinsic (to avoid duplicated entries).
+	pending_txs_tasks: RwLock<HashMap<ExtrinsicHash<ChainApi>, PendingPreInsertTask<ChainApi>>>,
+}
+
+/// Type alias to outcome of submission to `ViewStore`.
+pub(super) type ViewStoreSubmitOutcome<ChainApi> =
+	BaseSubmitOutcome<ChainApi, TxStatusStream<ChainApi>>;
+
+impl<ChainApi: graph::ChainApi> From<ValidatedPoolSubmitOutcome<ChainApi>>
+	for ViewStoreSubmitOutcome<ChainApi>
+{
+	fn from(value: ValidatedPoolSubmitOutcome<ChainApi>) -> Self {
+		Self::new(value.hash(), value.priority())
+	}
 }
 
 impl<ChainApi, Block> ViewStore<ChainApi, Block>
@@ -124,7 +204,7 @@ where
 			listener,
 			most_recent_view: RwLock::from(None),
 			dropped_stream_controller,
-			pending_txs_replacements: Default::default(),
+			pending_txs_tasks: Default::default(),
 		}
 	}
 
@@ -132,7 +212,7 @@ where
 	pub(super) async fn submit(
 		&self,
 		xts: impl IntoIterator<Item = (TimedTransactionSource, ExtrinsicFor<ChainApi>)> + Clone,
-	) -> HashMap<Block::Hash, Vec<Result<ExtrinsicHash<ChainApi>, ChainApi::Error>>> {
+	) -> HashMap<Block::Hash, Vec<Result<ViewStoreSubmitOutcome<ChainApi>, ChainApi::Error>>> {
 		let submit_futures = {
 			let active_views = self.active_views.read();
 			active_views
@@ -140,7 +220,16 @@ where
 				.map(|(_, view)| {
 					let view = view.clone();
 					let xts = xts.clone();
-					async move { (view.at.hash, view.submit_many(xts).await) }
+					async move {
+						(
+							view.at.hash,
+							view.submit_many(xts)
+								.await
+								.into_iter()
+								.map(|r| r.map(Into::into))
+								.collect::<Vec<_>>(),
+						)
+					}
 				})
 				.collect::<Vec<_>>()
 		};
@@ -153,7 +242,7 @@ where
 	pub(super) fn submit_local(
 		&self,
 		xt: ExtrinsicFor<ChainApi>,
-	) -> Result<ExtrinsicHash<ChainApi>, ChainApi::Error> {
+	) -> Result<ViewStoreSubmitOutcome<ChainApi>, ChainApi::Error> {
 		let active_views = self
 			.active_views
 			.read()
@@ -168,12 +257,14 @@ where
 			.map(|view| view.submit_local(xt.clone()))
 			.find_or_first(Result::is_ok);
 
-		if let Some(Err(err)) = result {
-			log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err);
-			return Err(err)
-		};
-
-		Ok(tx_hash)
+		match result {
+			Some(Err(err)) => {
+				log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err);
+				Err(err)
+			},
+			None => Ok(ViewStoreSubmitOutcome::new(tx_hash, None)),
+			Some(Ok(r)) => Ok(r.into()),
+		}
 	}
 
 	/// Import a single extrinsic and starts to watch its progress in the pool.
@@ -188,7 +279,7 @@ where
 		_at: Block::Hash,
 		source: TimedTransactionSource,
 		xt: ExtrinsicFor<ChainApi>,
-	) -> Result<TxStatusStream<ChainApi>, ChainApi::Error> {
+	) -> Result<ViewStoreSubmitOutcome<ChainApi>, ChainApi::Error> {
 		let tx_hash = self.api.hash_and_length(&xt).0;
 		let Some(external_watcher) = self.listener.create_external_watcher_for_tx(tx_hash) else {
 			return Err(PoolError::AlreadyImported(Box::new(tx_hash)).into())
@@ -203,13 +294,13 @@ where
 					let source = source.clone();
 					async move {
 						match view.submit_and_watch(source, xt).await {
-							Ok(watcher) => {
+							Ok(mut result) => {
 								self.listener.add_view_watcher_for_tx(
 									tx_hash,
 									view.at.hash,
-									watcher.into_stream().boxed(),
+									result.expect_watcher().into_stream().boxed(),
 								);
-								Ok(())
+								Ok(result)
 							},
 							Err(e) => Err(e),
 						}
@@ -217,17 +308,20 @@ where
 				})
 				.collect::<Vec<_>>()
 		};
-		let maybe_error = futures::future::join_all(submit_and_watch_futures)
+		let result = futures::future::join_all(submit_and_watch_futures)
 			.await
 			.into_iter()
 			.find_or_first(Result::is_ok);
 
-		if let Some(Err(err)) = maybe_error {
-			log::trace!(target: LOG_TARGET, "[{:?}] submit_and_watch: err: {}", tx_hash, err);
-			return Err(err);
-		};
-
-		Ok(external_watcher)
+		match result {
+			Some(Err(err)) => {
+				log::trace!(target: LOG_TARGET, "[{:?}] submit_and_watch: err: {}", tx_hash, err);
+				return Err(err);
+			},
+			Some(Ok(result)) =>
+				Ok(ViewStoreSubmitOutcome::from(result).with_watcher(external_watcher)),
+			None => Ok(ViewStoreSubmitOutcome::new(tx_hash, None).with_watcher(external_watcher)),
+		}
 	}
 
 	/// Returns the pool status for every active view.
@@ -575,8 +669,12 @@ where
 		replaced: ExtrinsicHash<ChainApi>,
 		watched: bool,
 	) {
-		if let Entry::Vacant(entry) = self.pending_txs_replacements.write().entry(replaced) {
-			entry.insert(PendingTxReplacement::new(xt.clone(), source.clone(), watched));
+		if let Entry::Vacant(entry) = self.pending_txs_tasks.write().entry(replaced) {
+			entry.insert(PendingPreInsertTask::new_submission_action(
+				xt.clone(),
+				source.clone(),
+				watched,
+			));
 		} else {
 			return
 		};
@@ -586,8 +684,8 @@ where
 
 		self.replace_transaction_in_views(source, xt, xt_hash, replaced, watched).await;
 
-		if let Some(replacement) = self.pending_txs_replacements.write().get_mut(&replaced) {
-			replacement.processed = true;
+		if let Some(replacement) = self.pending_txs_tasks.write().get_mut(&replaced) {
+			replacement.mark_processed();
 		}
 	}
 
@@ -596,18 +694,25 @@ where
 	/// After application, all already processed replacements are removed.
 	async fn apply_pending_tx_replacements(&self, view: Arc<View<ChainApi>>) {
 		let mut futures = vec![];
-		for replacement in self.pending_txs_replacements.read().values() {
-			let xt_hash = self.api.hash_and_length(&replacement.xt).0;
-			futures.push(self.replace_transaction_in_view(
-				view.clone(),
-				replacement.source.clone(),
-				replacement.xt.clone(),
-				xt_hash,
-				replacement.watched,
-			));
+		for replacement in self.pending_txs_tasks.read().values() {
+			match replacement.action {
+				PreInsertAction::SubmitTx(ref submission) => {
+					let xt_hash = self.api.hash_and_length(&submission.xt).0;
+					futures.push(self.replace_transaction_in_view(
+						view.clone(),
+						submission.source.clone(),
+						submission.xt.clone(),
+						xt_hash,
+						submission.watched,
+					));
+				},
+				PreInsertAction::RemoveSubtree(ref removal) => {
+					view.remove_subtree(removal.xt_hash, &*removal.listener_action);
+				},
+			}
 		}
 		let _results = futures::future::join_all(futures).await;
-		self.pending_txs_replacements.write().retain(|_, r| r.processed);
+		self.pending_txs_tasks.write().retain(|_, r| r.processed);
 	}
 
 	/// Submits `xt` to the given view.
@@ -623,11 +728,11 @@ where
 	) {
 		if watched {
 			match view.submit_and_watch(source, xt).await {
-				Ok(watcher) => {
+				Ok(mut result) => {
 					self.listener.add_view_watcher_for_tx(
 						xt_hash,
 						view.at.hash,
-						watcher.into_stream().boxed(),
+						result.expect_watcher().into_stream().boxed(),
 					);
 				},
 				Err(e) => {
@@ -690,4 +795,58 @@ where
 		};
 		let _results = futures::future::join_all(submit_futures).await;
 	}
+
+	/// Removes a transaction subtree from every view in the view_store, starting from the given
+	/// transaction hash.
+	///
+	/// This function traverses the dependency graph of transactions and removes the specified
+	/// transaction along with all its descendant transactions from every view.
+	///
+	/// A `listener_action` callback function is invoked for every transaction that is removed,
+	/// providing a reference to the pool's listener and the hash of the removed transaction. This
+	/// allows to trigger the required events. Note that listener may be called multiple times for
+	/// the same hash.
+	///
+	/// Function will also schedule view pre-insertion actions to ensure that transactions will be
+	/// removed from newly created view.
+	///
+	/// Returns a vector containing the hashes of all removed transactions, including the root
+	/// transaction specified by `tx_hash`. Vector contains only unique hashes.
+	pub(super) fn remove_transaction_subtree<F>(
+		&self,
+		xt_hash: ExtrinsicHash<ChainApi>,
+		listener_action: F,
+	) -> Vec<ExtrinsicHash<ChainApi>>
+	where
+		F: Fn(&mut crate::graph::Listener<ChainApi>, ExtrinsicHash<ChainApi>)
+			+ Clone
+			+ Send
+			+ Sync
+			+ 'static,
+	{
+		if let Entry::Vacant(entry) = self.pending_txs_tasks.write().entry(xt_hash) {
+			entry.insert(PendingPreInsertTask::new_removal_action(
+				xt_hash,
+				Arc::from(listener_action.clone()),
+			));
+		};
+
+		let mut seen = HashSet::new();
+
+		let removed = self
+			.active_views
+			.read()
+			.iter()
+			.chain(self.inactive_views.read().iter())
+			.filter(|(_, view)| view.is_imported(&xt_hash))
+			.flat_map(|(_, view)| view.remove_subtree(xt_hash, &listener_action))
+			.filter(|xt_hash| seen.insert(*xt_hash))
+			.collect();
+
+		if let Some(removal_action) = self.pending_txs_tasks.write().get_mut(&xt_hash) {
+			removal_action.mark_processed();
+		}
+
+		removed
+	}
 }
diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs
index 04eaa998f42..3b4afc88b78 100644
--- a/substrate/client/transaction-pool/src/graph/base_pool.rs
+++ b/substrate/client/transaction-pool/src/graph/base_pool.rs
@@ -453,27 +453,29 @@ impl<Hash: hash::Hash + Member + Serialize, Ex: std::fmt::Debug> BasePool<Hash,
 
 		while ready.is_exceeded(self.ready.len(), self.ready.bytes()) {
 			// find the worst transaction
-			let worst = self.ready.fold::<TransactionRef<Hash, Ex>, _>(|worst, current| {
-				let transaction = &current.transaction;
-				worst
-					.map(|worst| {
-						// Here we don't use `TransactionRef`'s ordering implementation because
-						// while it prefers priority like need here, it also prefers older
-						// transactions for inclusion purposes and limit enforcement needs to prefer
-						// newer transactions instead and drop the older ones.
-						match worst.transaction.priority.cmp(&transaction.transaction.priority) {
-							Ordering::Less => worst,
-							Ordering::Equal =>
-								if worst.insertion_id > transaction.insertion_id {
-									transaction.clone()
-								} else {
-									worst
-								},
-							Ordering::Greater => transaction.clone(),
-						}
-					})
-					.or_else(|| Some(transaction.clone()))
-			});
+			let worst =
+				self.ready.fold::<Option<TransactionRef<Hash, Ex>>, _>(None, |worst, current| {
+					let transaction = &current.transaction;
+					worst
+						.map(|worst| {
+							// Here we don't use `TransactionRef`'s ordering implementation because
+							// while it prefers priority like need here, it also prefers older
+							// transactions for inclusion purposes and limit enforcement needs to
+							// prefer newer transactions instead and drop the older ones.
+							match worst.transaction.priority.cmp(&transaction.transaction.priority)
+							{
+								Ordering::Less => worst,
+								Ordering::Equal =>
+									if worst.insertion_id > transaction.insertion_id {
+										transaction.clone()
+									} else {
+										worst
+									},
+								Ordering::Greater => transaction.clone(),
+							}
+						})
+						.or_else(|| Some(transaction.clone()))
+				});
 
 			if let Some(worst) = worst {
 				removed.append(&mut self.remove_subtree(&[worst.transaction.hash.clone()]))
diff --git a/substrate/client/transaction-pool/src/graph/listener.rs b/substrate/client/transaction-pool/src/graph/listener.rs
index 41daf5491f7..7b09ee4c640 100644
--- a/substrate/client/transaction-pool/src/graph/listener.rs
+++ b/substrate/client/transaction-pool/src/graph/listener.rs
@@ -126,8 +126,8 @@ impl<H: hash::Hash + traits::Member + Serialize + Clone, C: ChainApi> Listener<H
 	}
 
 	/// Transaction was dropped from the pool because of enforcing the limit.
-	pub fn limit_enforced(&mut self, tx: &H) {
-		trace!(target: LOG_TARGET, "[{:?}] Dropped (limit enforced)", tx);
+	pub fn limits_enforced(&mut self, tx: &H) {
+		trace!(target: LOG_TARGET, "[{:?}] Dropped (limits enforced)", tx);
 		self.fire(tx, |watcher| watcher.limit_enforced());
 
 		if let Some(ref sink) = self.dropped_by_limits_sink {
diff --git a/substrate/client/transaction-pool/src/graph/mod.rs b/substrate/client/transaction-pool/src/graph/mod.rs
index d93898b1b22..2114577f4de 100644
--- a/substrate/client/transaction-pool/src/graph/mod.rs
+++ b/substrate/client/transaction-pool/src/graph/mod.rs
@@ -41,6 +41,12 @@ pub use self::pool::{
 	BlockHash, ChainApi, ExtrinsicFor, ExtrinsicHash, NumberFor, Options, Pool, RawExtrinsicFor,
 	TransactionFor, ValidatedTransactionFor,
 };
-pub use validated_pool::{IsValidator, ValidatedTransaction};
+pub use validated_pool::{
+	BaseSubmitOutcome, IsValidator, Listener, ValidatedPoolSubmitOutcome, ValidatedTransaction,
+};
 
+pub(crate) use self::pool::CheckBannedBeforeVerify;
 pub(crate) use listener::DroppedByLimitsEvent;
+
+#[cfg(doc)]
+pub(crate) use validated_pool::ValidatedPool;
diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs
index 4c0ace0b1c7..403712662ad 100644
--- a/substrate/client/transaction-pool/src/graph/pool.rs
+++ b/substrate/client/transaction-pool/src/graph/pool.rs
@@ -37,7 +37,7 @@ use std::{
 use super::{
 	base_pool as base,
 	validated_pool::{IsValidator, ValidatedPool, ValidatedTransaction},
-	watcher::Watcher,
+	ValidatedPoolSubmitOutcome,
 };
 
 /// Modification notification event stream type;
@@ -168,7 +168,7 @@ impl Options {
 /// Should we check that the transaction is banned
 /// in the pool, before we verify it?
 #[derive(Copy, Clone)]
-enum CheckBannedBeforeVerify {
+pub(crate) enum CheckBannedBeforeVerify {
 	Yes,
 	No,
 }
@@ -204,7 +204,7 @@ impl<B: ChainApi> Pool<B> {
 		&self,
 		at: &HashAndNumber<B::Block>,
 		xts: impl IntoIterator<Item = (base::TimedTransactionSource, ExtrinsicFor<B>)>,
-	) -> Vec<Result<ExtrinsicHash<B>, B::Error>> {
+	) -> Vec<Result<ValidatedPoolSubmitOutcome<B>, B::Error>> {
 		let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await;
 		self.validated_pool.submit(validated_transactions.into_values())
 	}
@@ -216,7 +216,7 @@ impl<B: ChainApi> Pool<B> {
 		&self,
 		at: &HashAndNumber<B::Block>,
 		xts: impl IntoIterator<Item = (base::TimedTransactionSource, ExtrinsicFor<B>)>,
-	) -> Vec<Result<ExtrinsicHash<B>, B::Error>> {
+	) -> Vec<Result<ValidatedPoolSubmitOutcome<B>, B::Error>> {
 		let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await;
 		self.validated_pool.submit(validated_transactions.into_values())
 	}
@@ -227,7 +227,7 @@ impl<B: ChainApi> Pool<B> {
 		at: &HashAndNumber<B::Block>,
 		source: base::TimedTransactionSource,
 		xt: ExtrinsicFor<B>,
-	) -> Result<ExtrinsicHash<B>, B::Error> {
+	) -> Result<ValidatedPoolSubmitOutcome<B>, B::Error> {
 		let res = self.submit_at(at, std::iter::once((source, xt))).await.pop();
 		res.expect("One extrinsic passed; one result returned; qed")
 	}
@@ -238,7 +238,7 @@ impl<B: ChainApi> Pool<B> {
 		at: &HashAndNumber<B::Block>,
 		source: base::TimedTransactionSource,
 		xt: ExtrinsicFor<B>,
-	) -> Result<Watcher<ExtrinsicHash<B>, ExtrinsicHash<B>>, B::Error> {
+	) -> Result<ValidatedPoolSubmitOutcome<B>, B::Error> {
 		let (_, tx) = self
 			.verify_one(at.hash, at.number, source, xt, CheckBannedBeforeVerify::Yes)
 			.await;
@@ -432,7 +432,7 @@ impl<B: ChainApi> Pool<B> {
 	}
 
 	/// Returns future that validates single transaction at given block.
-	async fn verify_one(
+	pub(crate) async fn verify_one(
 		&self,
 		block_hash: <B::Block as BlockT>::Hash,
 		block_number: NumberFor<B>,
@@ -539,6 +539,7 @@ mod tests {
 				.into(),
 			),
 		)
+		.map(|outcome| outcome.hash())
 		.unwrap();
 
 		// then
@@ -567,7 +568,10 @@ mod tests {
 
 		// when
 		let txs = txs.into_iter().map(|x| (SOURCE, Arc::from(x))).collect::<Vec<_>>();
-		let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs));
+		let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs))
+			.into_iter()
+			.map(|r| r.map(|o| o.hash()))
+			.collect::<Vec<_>>();
 		log::debug!("--> {hashes:#?}");
 
 		// then
@@ -591,7 +595,8 @@ mod tests {
 
 		// when
 		pool.validated_pool.ban(&Instant::now(), vec![pool.hash_of(&uxt)]);
-		let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into()));
+		let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into()))
+			.map(|o| o.hash());
 		assert_eq!(pool.validated_pool().status().ready, 0);
 		assert_eq!(pool.validated_pool().status().future, 0);
 
@@ -614,7 +619,8 @@ mod tests {
 		let uxt = ExtrinsicBuilder::new_include_data(vec![42]).build();
 
 		// when
-		let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into()));
+		let res = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.into()))
+			.map(|o| o.hash());
 
 		// then
 		assert_matches!(res.unwrap_err(), error::Error::Unactionable);
@@ -642,7 +648,8 @@ mod tests {
 					.into(),
 				),
 			)
-			.unwrap();
+			.unwrap()
+			.hash();
 			let hash1 = block_on(
 				pool.submit_one(
 					&han_of_block0,
@@ -656,7 +663,8 @@ mod tests {
 					.into(),
 				),
 			)
-			.unwrap();
+			.unwrap()
+			.hash();
 			// future doesn't count
 			let _hash = block_on(
 				pool.submit_one(
@@ -671,7 +679,8 @@ mod tests {
 					.into(),
 				),
 			)
-			.unwrap();
+			.unwrap()
+			.hash();
 
 			assert_eq!(pool.validated_pool().status().ready, 2);
 			assert_eq!(pool.validated_pool().status().future, 1);
@@ -704,7 +713,8 @@ mod tests {
 				.into(),
 			),
 		)
-		.unwrap();
+		.unwrap()
+		.hash();
 		let hash2 = block_on(
 			pool.submit_one(
 				&han_of_block0,
@@ -718,7 +728,8 @@ mod tests {
 				.into(),
 			),
 		)
-		.unwrap();
+		.unwrap()
+		.hash();
 		let hash3 = block_on(
 			pool.submit_one(
 				&han_of_block0,
@@ -732,7 +743,8 @@ mod tests {
 				.into(),
 			),
 		)
-		.unwrap();
+		.unwrap()
+		.hash();
 
 		// when
 		pool.validated_pool.clear_stale(&api.expect_hash_and_number(5));
@@ -764,7 +776,8 @@ mod tests {
 				.into(),
 			),
 		)
-		.unwrap();
+		.unwrap()
+		.hash();
 
 		// when
 		block_on(pool.prune_tags(&api.expect_hash_and_number(1), vec![vec![0]], vec![hash1]));
@@ -792,8 +805,9 @@ mod tests {
 		let api = Arc::new(TestApi::default());
 		let pool = Pool::new_with_staticly_sized_rotator(options, true.into(), api.clone());
 
-		let hash1 =
-			block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into())).unwrap();
+		let hash1 = block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt.into()))
+			.unwrap()
+			.hash();
 		assert_eq!(pool.validated_pool().status().future, 1);
 
 		// when
@@ -810,7 +824,8 @@ mod tests {
 				.into(),
 			),
 		)
-		.unwrap();
+		.unwrap()
+		.hash();
 
 		// then
 		assert_eq!(pool.validated_pool().status().future, 1);
@@ -842,6 +857,7 @@ mod tests {
 				.into(),
 			),
 		)
+		.map(|o| o.hash())
 		.unwrap_err();
 
 		// then
@@ -868,6 +884,7 @@ mod tests {
 				.into(),
 			),
 		)
+		.map(|o| o.hash())
 		.unwrap_err();
 
 		// then
@@ -896,7 +913,8 @@ mod tests {
 					.into(),
 				),
 			)
-			.unwrap();
+			.unwrap()
+			.expect_watcher();
 			assert_eq!(pool.validated_pool().status().ready, 1);
 			assert_eq!(pool.validated_pool().status().future, 0);
 
@@ -933,7 +951,8 @@ mod tests {
 					.into(),
 				),
 			)
-			.unwrap();
+			.unwrap()
+			.expect_watcher();
 			assert_eq!(pool.validated_pool().status().ready, 1);
 			assert_eq!(pool.validated_pool().status().future, 0);
 
@@ -972,7 +991,8 @@ mod tests {
 					.into(),
 				),
 			)
-			.unwrap();
+			.unwrap()
+			.expect_watcher();
 			assert_eq!(pool.validated_pool().status().ready, 0);
 			assert_eq!(pool.validated_pool().status().future, 1);
 
@@ -1011,7 +1031,8 @@ mod tests {
 			});
 			let watcher =
 				block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, uxt.into()))
-					.unwrap();
+					.unwrap()
+					.expect_watcher();
 			assert_eq!(pool.validated_pool().status().ready, 1);
 
 			// when
@@ -1036,7 +1057,8 @@ mod tests {
 			});
 			let watcher =
 				block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, uxt.into()))
-					.unwrap();
+					.unwrap()
+					.expect_watcher();
 			assert_eq!(pool.validated_pool().status().ready, 1);
 
 			// when
@@ -1069,7 +1091,8 @@ mod tests {
 			});
 			let watcher =
 				block_on(pool.submit_and_watch(&api.expect_hash_and_number(0), SOURCE, xt.into()))
-					.unwrap();
+					.unwrap()
+					.expect_watcher();
 			assert_eq!(pool.validated_pool().status().ready, 1);
 
 			// when
@@ -1136,7 +1159,9 @@ mod tests {
 				// after validation `IncludeData` will have priority set to 9001
 				// (validate_transaction mock)
 				let xt = ExtrinsicBuilder::new_include_data(Vec::new()).build();
-				block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())).unwrap();
+				block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into()))
+					.unwrap()
+					.expect_watcher();
 				assert_eq!(pool.validated_pool().status().ready, 1);
 
 				// after validation `Transfer` will have priority set to 4 (validate_transaction
@@ -1147,8 +1172,9 @@ mod tests {
 					amount: 5,
 					nonce: 0,
 				});
-				let watcher =
-					block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into())).unwrap();
+				let watcher = block_on(pool.submit_and_watch(&han_of_block0, SOURCE, xt.into()))
+					.unwrap()
+					.expect_watcher();
 				assert_eq!(pool.validated_pool().status().ready, 2);
 
 				// when
diff --git a/substrate/client/transaction-pool/src/graph/ready.rs b/substrate/client/transaction-pool/src/graph/ready.rs
index 9061d0e2558..b8aef99e638 100644
--- a/substrate/client/transaction-pool/src/graph/ready.rs
+++ b/substrate/client/transaction-pool/src/graph/ready.rs
@@ -232,12 +232,10 @@ impl<Hash: hash::Hash + Member + Serialize, Ex> ReadyTransactions<Hash, Ex> {
 		Ok(replaced)
 	}
 
-	/// Fold a list of ready transactions to compute a single value.
-	pub fn fold<R, F: FnMut(Option<R>, &ReadyTx<Hash, Ex>) -> Option<R>>(
-		&mut self,
-		f: F,
-	) -> Option<R> {
-		self.ready.read().values().fold(None, f)
+	/// Fold a list of ready transactions to compute a single value using initial value of
+	/// accumulator.
+	pub fn fold<R, F: FnMut(R, &ReadyTx<Hash, Ex>) -> R>(&self, init: R, f: F) -> R {
+		self.ready.read().values().fold(init, f)
 	}
 
 	/// Returns true if given transaction is part of the queue.
diff --git a/substrate/client/transaction-pool/src/graph/tracked_map.rs b/substrate/client/transaction-pool/src/graph/tracked_map.rs
index 6c3bbbf34b5..fe15c6eca30 100644
--- a/substrate/client/transaction-pool/src/graph/tracked_map.rs
+++ b/substrate/client/transaction-pool/src/graph/tracked_map.rs
@@ -173,6 +173,11 @@ where
 	pub fn len(&mut self) -> usize {
 		self.inner_guard.len()
 	}
+
+	/// Returns an iterator over all key-value pairs.
+	pub fn iter(&self) -> Iter<'_, K, V> {
+		self.inner_guard.iter()
+	}
 }
 
 #[cfg(test)]
diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs
index 3f7bf4773de..bc2b07896db 100644
--- a/substrate/client/transaction-pool/src/graph/validated_pool.rs
+++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs
@@ -18,25 +18,22 @@
 
 use std::{
 	collections::{HashMap, HashSet},
-	hash,
 	sync::Arc,
 };
 
 use crate::{common::log_xt::log_xt_trace, LOG_TARGET};
 use futures::channel::mpsc::{channel, Sender};
 use parking_lot::{Mutex, RwLock};
-use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions};
-use serde::Serialize;
+use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions, TransactionPriority};
 use sp_blockchain::HashAndNumber;
 use sp_runtime::{
-	traits::{self, SaturatedConversion},
+	traits::SaturatedConversion,
 	transaction_validity::{TransactionTag as Tag, ValidTransaction},
 };
 use std::time::Instant;
 
 use super::{
 	base_pool::{self as base, PruneStatus},
-	listener::Listener,
 	pool::{
 		BlockHash, ChainApi, EventStream, ExtrinsicFor, ExtrinsicHash, Options, TransactionFor,
 	},
@@ -79,12 +76,23 @@ impl<Hash, Ex, Error> ValidatedTransaction<Hash, Ex, Error> {
 			valid_till: at.saturated_into::<u64>().saturating_add(validity.longevity),
 		})
 	}
+
+	/// Returns priority for valid transaction, None if transaction is not valid.
+	pub fn priority(&self) -> Option<TransactionPriority> {
+		match self {
+			ValidatedTransaction::Valid(base::Transaction { priority, .. }) => Some(*priority),
+			_ => None,
+		}
+	}
 }
 
-/// A type of validated transaction stored in the pool.
+/// A type of validated transaction stored in the validated pool.
 pub type ValidatedTransactionFor<B> =
 	ValidatedTransaction<ExtrinsicHash<B>, ExtrinsicFor<B>, <B as ChainApi>::Error>;
 
+/// A type alias representing ValidatedPool listener for given ChainApi type.
+pub type Listener<B> = super::listener::Listener<ExtrinsicHash<B>, B>;
+
 /// A closure that returns true if the local node is a validator that can author blocks.
 #[derive(Clone)]
 pub struct IsValidator(Arc<Box<dyn Fn() -> bool + Send + Sync>>);
@@ -101,12 +109,56 @@ impl From<Box<dyn Fn() -> bool + Send + Sync>> for IsValidator {
 	}
 }
 
+/// Represents the result of `submit` or `submit_and_watch` operations.
+pub struct BaseSubmitOutcome<B: ChainApi, W> {
+	/// The hash of the submitted transaction.
+	hash: ExtrinsicHash<B>,
+	/// A transaction watcher. This is `Some` for `submit_and_watch` and `None` for `submit`.
+	watcher: Option<W>,
+
+	/// The priority of the transaction. Defaults to None if unknown.
+	priority: Option<TransactionPriority>,
+}
+
+/// Type alias to outcome of submission to `ValidatedPool`.
+pub type ValidatedPoolSubmitOutcome<B> =
+	BaseSubmitOutcome<B, Watcher<ExtrinsicHash<B>, ExtrinsicHash<B>>>;
+
+impl<B: ChainApi, W> BaseSubmitOutcome<B, W> {
+	/// Creates a new instance with given hash and priority.
+	pub fn new(hash: ExtrinsicHash<B>, priority: Option<TransactionPriority>) -> Self {
+		Self { hash, priority, watcher: None }
+	}
+
+	/// Sets the transaction watcher.
+	pub fn with_watcher(mut self, watcher: W) -> Self {
+		self.watcher = Some(watcher);
+		self
+	}
+
+	/// Provides priority of submitted transaction.
+	pub fn priority(&self) -> Option<TransactionPriority> {
+		self.priority
+	}
+
+	/// Provides hash of submitted transaction.
+	pub fn hash(&self) -> ExtrinsicHash<B> {
+		self.hash
+	}
+
+	/// Provides a watcher. Should only be called on outcomes of `submit_and_watch`. Otherwise will
+	/// panic (that would mean logical error in program).
+	pub fn expect_watcher(&mut self) -> W {
+		self.watcher.take().expect("watcher was set in submit_and_watch. qed")
+	}
+}
+
 /// Pool that deals with validated transactions.
 pub struct ValidatedPool<B: ChainApi> {
 	api: Arc<B>,
 	is_validator: IsValidator,
 	options: Options,
-	listener: RwLock<Listener<ExtrinsicHash<B>, B>>,
+	listener: RwLock<Listener<B>>,
 	pub(crate) pool: RwLock<base::BasePool<ExtrinsicHash<B>, ExtrinsicFor<B>>>,
 	import_notification_sinks: Mutex<Vec<Sender<ExtrinsicHash<B>>>>,
 	rotator: PoolRotator<ExtrinsicHash<B>>,
@@ -200,7 +252,7 @@ impl<B: ChainApi> ValidatedPool<B> {
 	pub fn submit(
 		&self,
 		txs: impl IntoIterator<Item = ValidatedTransactionFor<B>>,
-	) -> Vec<Result<ExtrinsicHash<B>, B::Error>> {
+	) -> Vec<Result<ValidatedPoolSubmitOutcome<B>, B::Error>> {
 		let results = txs
 			.into_iter()
 			.map(|validated_tx| self.submit_one(validated_tx))
@@ -216,7 +268,7 @@ impl<B: ChainApi> ValidatedPool<B> {
 		results
 			.into_iter()
 			.map(|res| match res {
-				Ok(ref hash) if removed.contains(hash) =>
+				Ok(outcome) if removed.contains(&outcome.hash) =>
 					Err(error::Error::ImmediatelyDropped.into()),
 				other => other,
 			})
@@ -224,9 +276,13 @@ impl<B: ChainApi> ValidatedPool<B> {
 	}
 
 	/// Submit single pre-validated transaction to the pool.
-	fn submit_one(&self, tx: ValidatedTransactionFor<B>) -> Result<ExtrinsicHash<B>, B::Error> {
+	fn submit_one(
+		&self,
+		tx: ValidatedTransactionFor<B>,
+	) -> Result<ValidatedPoolSubmitOutcome<B>, B::Error> {
 		match tx {
 			ValidatedTransaction::Valid(tx) => {
+				let priority = tx.priority;
 				log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one", tx.hash);
 				if !tx.propagate && !(self.is_validator.0)() {
 					return Err(error::Error::Unactionable.into())
@@ -254,7 +310,7 @@ impl<B: ChainApi> ValidatedPool<B> {
 
 				let mut listener = self.listener.write();
 				fire_events(&mut *listener, &imported);
-				Ok(*imported.hash())
+				Ok(ValidatedPoolSubmitOutcome::new(*imported.hash(), Some(priority)))
 			},
 			ValidatedTransaction::Invalid(hash, err) => {
 				log::trace!(target: LOG_TARGET, "[{:?}] ValidatedPool::submit_one invalid: {:?}", hash, err);
@@ -305,7 +361,7 @@ impl<B: ChainApi> ValidatedPool<B> {
 			// run notifications
 			let mut listener = self.listener.write();
 			for h in &removed {
-				listener.limit_enforced(h);
+				listener.limits_enforced(h);
 			}
 
 			removed
@@ -318,7 +374,7 @@ impl<B: ChainApi> ValidatedPool<B> {
 	pub fn submit_and_watch(
 		&self,
 		tx: ValidatedTransactionFor<B>,
-	) -> Result<Watcher<ExtrinsicHash<B>, ExtrinsicHash<B>>, B::Error> {
+	) -> Result<ValidatedPoolSubmitOutcome<B>, B::Error> {
 		match tx {
 			ValidatedTransaction::Valid(tx) => {
 				let hash = self.api.hash_and_length(&tx.data).0;
@@ -326,7 +382,7 @@ impl<B: ChainApi> ValidatedPool<B> {
 				self.submit(std::iter::once(ValidatedTransaction::Valid(tx)))
 					.pop()
 					.expect("One extrinsic passed; one result returned; qed")
-					.map(|_| watcher)
+					.map(|outcome| outcome.with_watcher(watcher))
 			},
 			ValidatedTransaction::Invalid(hash, err) => {
 				self.rotator.ban(&Instant::now(), std::iter::once(hash));
@@ -711,11 +767,42 @@ impl<B: ChainApi> ValidatedPool<B> {
 			listener.future(&f.hash);
 		});
 	}
+
+	/// Removes a transaction subtree from the pool, starting from the given transaction hash.
+	///
+	/// This function traverses the dependency graph of transactions and removes the specified
+	/// transaction along with all its descendant transactions from the pool.
+	///
+	/// A `listener_action` callback function is invoked for every transaction that is removed,
+	/// providing a reference to the pool's listener and the hash of the removed transaction. This
+	/// allows to trigger the required events.
+	///
+	/// Returns a vector containing the hashes of all removed transactions, including the root
+	/// transaction specified by `tx_hash`.
+	pub fn remove_subtree<F>(
+		&self,
+		tx_hash: ExtrinsicHash<B>,
+		listener_action: F,
+	) -> Vec<ExtrinsicHash<B>>
+	where
+		F: Fn(&mut Listener<B>, ExtrinsicHash<B>),
+	{
+		self.pool
+			.write()
+			.remove_subtree(&[tx_hash])
+			.into_iter()
+			.map(|tx| {
+				let removed_tx_hash = tx.hash;
+				let mut listener = self.listener.write();
+				listener_action(&mut *listener, removed_tx_hash);
+				removed_tx_hash
+			})
+			.collect::<Vec<_>>()
+	}
 }
 
-fn fire_events<H, B, Ex>(listener: &mut Listener<H, B>, imported: &base::Imported<H, Ex>)
+fn fire_events<B, Ex>(listener: &mut Listener<B>, imported: &base::Imported<ExtrinsicHash<B>, Ex>)
 where
-	H: hash::Hash + Eq + traits::Member + Serialize,
 	B: ChainApi,
 {
 	match *imported {
diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs
index caa09585b28..2a691ae35ea 100644
--- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs
+++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs
@@ -405,7 +405,8 @@ mod tests {
 			TimedTransactionSource::new_external(false),
 			uxt.clone().into(),
 		))
-		.expect("Should be valid");
+		.expect("Should be valid")
+		.hash();
 
 		block_on(queue.revalidate_later(han_of_block0.hash, vec![uxt_hash]));
 
@@ -448,7 +449,7 @@ mod tests {
 				vec![(source.clone(), uxt0.into()), (source, uxt1.into())],
 			))
 			.into_iter()
-			.map(|r| r.expect("Should be valid"))
+			.map(|r| r.expect("Should be valid").hash())
 			.collect::<Vec<_>>();
 
 		assert_eq!(api.validation_requests().len(), 2);
diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs
index 2b32704945c..3598f9dbc2a 100644
--- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs
+++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs
@@ -274,7 +274,12 @@ where
 
 		let number = self.api.resolve_block_number(at);
 		let at = HashAndNumber { hash: at, number: number? };
-		Ok(pool.submit_at(&at, xts).await)
+		Ok(pool
+			.submit_at(&at, xts)
+			.await
+			.into_iter()
+			.map(|result| result.map(|outcome| outcome.hash()))
+			.collect())
 	}
 
 	async fn submit_one(
@@ -292,6 +297,7 @@ where
 		let at = HashAndNumber { hash: at, number: number? };
 		pool.submit_one(&at, TimedTransactionSource::from_transaction_source(source, false), xt)
 			.await
+			.map(|outcome| outcome.hash())
 	}
 
 	async fn submit_and_watch(
@@ -308,15 +314,13 @@ where
 		let number = self.api.resolve_block_number(at);
 
 		let at = HashAndNumber { hash: at, number: number? };
-		let watcher = pool
-			.submit_and_watch(
-				&at,
-				TimedTransactionSource::from_transaction_source(source, false),
-				xt,
-			)
-			.await?;
-
-		Ok(watcher.into_stream().boxed())
+		pool.submit_and_watch(
+			&at,
+			TimedTransactionSource::from_transaction_source(source, false),
+			xt,
+		)
+		.await
+		.map(|mut outcome| outcome.expect_watcher().into_stream().boxed())
 	}
 
 	fn remove_invalid(&self, hashes: &[TxHash<Self>]) -> Vec<Arc<Self::InPoolTransaction>> {
@@ -484,7 +488,11 @@ where
 			validity,
 		);
 
-		self.pool.validated_pool().submit(vec![validated]).remove(0)
+		self.pool
+			.validated_pool()
+			.submit(vec![validated])
+			.remove(0)
+			.map(|outcome| outcome.hash())
 	}
 }
 
diff --git a/substrate/client/transaction-pool/tests/fatp_common/mod.rs b/substrate/client/transaction-pool/tests/fatp_common/mod.rs
index aaffebc0db0..530c25caf88 100644
--- a/substrate/client/transaction-pool/tests/fatp_common/mod.rs
+++ b/substrate/client/transaction-pool/tests/fatp_common/mod.rs
@@ -192,12 +192,9 @@ macro_rules! assert_ready_iterator {
 		let output: Vec<_> = ready_iterator.collect();
 		log::debug!(target:LOG_TARGET, "expected: {:#?}", expected);
 		log::debug!(target:LOG_TARGET, "output: {:#?}", output);
+		let output = output.into_iter().map(|t|t.hash).collect::<Vec<_>>();
 		assert_eq!(expected.len(), output.len());
-		assert!(
-			output.iter().zip(expected.iter()).all(|(o,e)| {
-				o.hash == *e
-			})
-		);
+		assert_eq!(output,expected);
 	}};
 }
 
@@ -215,6 +212,18 @@ macro_rules! assert_future_iterator {
 	}};
 }
 
+#[macro_export]
+macro_rules! assert_watcher_stream {
+	($stream:ident, [$( $event:expr ),*]) => {{
+		let expected = vec![ $($event),*];
+		log::debug!(target:LOG_TARGET, "expected: {:#?} {}, block now:", expected, expected.len());
+		let output = futures::executor::block_on_stream($stream).take(expected.len()).collect::<Vec<_>>();
+		log::debug!(target:LOG_TARGET, "output: {:#?}", output);
+		assert_eq!(expected.len(), output.len());
+		assert_eq!(output, expected);
+	}};
+}
+
 pub const SOURCE: TransactionSource = TransactionSource::External;
 
 #[cfg(test)]
diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs
index 4ed9b450386..af5e7e8c5a6 100644
--- a/substrate/client/transaction-pool/tests/fatp_prios.rs
+++ b/substrate/client/transaction-pool/tests/fatp_prios.rs
@@ -20,13 +20,15 @@
 
 pub mod fatp_common;
 
-use fatp_common::{new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE};
+use fatp_common::{invalid_hash, new_best_block_event, TestPoolBuilder, LOG_TARGET, SOURCE};
 use futures::{executor::block_on, FutureExt};
 use sc_transaction_pool::ChainApi;
-use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionStatus};
+use sc_transaction_pool_api::{
+	error::Error as TxPoolError, LocalTransactionPool, MaintainedTransactionPool, TransactionPool,
+	TransactionStatus,
+};
 use substrate_test_runtime_client::Sr25519Keyring::*;
 use substrate_test_runtime_transaction_pool::uxt;
-
 #[test]
 fn fatp_prio_ready_higher_evicts_lower() {
 	sp_tracing::try_init_simple();
@@ -247,3 +249,312 @@ fn fatp_prio_watcher_future_lower_prio_gets_dropped_from_all_views() {
 	assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]);
 	assert_ready_iterator!(header02.hash(), pool, [xt2, xt1]);
 }
+
+#[test]
+fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() {
+	sp_tracing::try_init_simple();
+
+	let builder = TestPoolBuilder::new();
+	let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build();
+	api.set_nonce(api.genesis_hash(), Bob.into(), 300);
+	api.set_nonce(api.genesis_hash(), Charlie.into(), 400);
+	api.set_nonce(api.genesis_hash(), Dave.into(), 500);
+	api.set_nonce(api.genesis_hash(), Eve.into(), 600);
+	api.set_nonce(api.genesis_hash(), Ferdie.into(), 700);
+
+	let header01 = api.push_block(1, vec![], true);
+	let event = new_best_block_event(&pool, None, header01.hash());
+	block_on(pool.maintain(event));
+
+	let xt0 = uxt(Alice, 200);
+	let xt1 = uxt(Bob, 300);
+	let xt2 = uxt(Charlie, 400);
+
+	let xt3 = uxt(Dave, 500);
+
+	let xt4 = uxt(Eve, 600);
+	let xt5 = uxt(Ferdie, 700);
+
+	api.set_priority(&xt0, 1);
+	api.set_priority(&xt1, 2);
+	api.set_priority(&xt2, 3);
+	api.set_priority(&xt3, 4);
+
+	api.set_priority(&xt4, 5);
+	api.set_priority(&xt5, 6);
+
+	let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap();
+	let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap();
+
+	assert_pool_status!(header01.hash(), &pool, 2, 0);
+	assert_eq!(pool.mempool_len().1, 2);
+
+	let header02 = api.push_block_with_parent(header01.hash(), vec![], true);
+	block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash())));
+
+	let _xt2_watcher =
+		block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap();
+	let _xt3_watcher =
+		block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap();
+
+	assert_pool_status!(header02.hash(), &pool, 2, 0);
+	assert_eq!(pool.mempool_len().1, 4);
+
+	let header03 = api.push_block_with_parent(header02.hash(), vec![], true);
+	block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash())));
+
+	let _xt4_watcher =
+		block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap();
+	let _xt5_watcher =
+		block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap();
+
+	assert_pool_status!(header03.hash(), &pool, 2, 0);
+	assert_eq!(pool.mempool_len().1, 4);
+
+	assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]);
+	assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]);
+
+	assert_ready_iterator!(header01.hash(), pool, []);
+	assert_ready_iterator!(header02.hash(), pool, [xt3, xt2]);
+	assert_ready_iterator!(header03.hash(), pool, [xt5, xt4]);
+}
+
+#[test]
+fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree() {
+	sp_tracing::try_init_simple();
+
+	let builder = TestPoolBuilder::new();
+	let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(4).build();
+	api.set_nonce(api.genesis_hash(), Bob.into(), 300);
+	api.set_nonce(api.genesis_hash(), Charlie.into(), 400);
+
+	let header01 = api.push_block(1, vec![], true);
+	let event = new_best_block_event(&pool, None, header01.hash());
+	block_on(pool.maintain(event));
+
+	let xt0 = uxt(Alice, 200);
+	let xt1 = uxt(Alice, 201);
+	let xt2 = uxt(Alice, 202);
+	let xt3 = uxt(Bob, 300);
+	let xt4 = uxt(Charlie, 400);
+
+	api.set_priority(&xt0, 1);
+	api.set_priority(&xt1, 3);
+	api.set_priority(&xt2, 3);
+	api.set_priority(&xt3, 2);
+	api.set_priority(&xt4, 2);
+
+	let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap();
+	let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap();
+	let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap();
+	let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap();
+
+	assert_ready_iterator!(header01.hash(), pool, [xt3, xt0, xt1, xt2]);
+	assert_pool_status!(header01.hash(), &pool, 4, 0);
+	assert_eq!(pool.mempool_len().1, 4);
+
+	let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap();
+	assert_pool_status!(header01.hash(), &pool, 2, 0);
+	assert_ready_iterator!(header01.hash(), pool, [xt3, xt4]);
+
+	assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]);
+	assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]);
+	assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]);
+	assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]);
+	assert_watcher_stream!(xt4_watcher, [TransactionStatus::Ready]);
+}
+
+#[test]
+fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree2() {
+	sp_tracing::try_init_simple();
+
+	let builder = TestPoolBuilder::new();
+	let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(4).build();
+	api.set_nonce(api.genesis_hash(), Bob.into(), 300);
+	api.set_nonce(api.genesis_hash(), Charlie.into(), 400);
+
+	let header01 = api.push_block(1, vec![], true);
+	let event = new_best_block_event(&pool, None, header01.hash());
+	block_on(pool.maintain(event));
+
+	let xt0 = uxt(Alice, 200);
+	let xt1 = uxt(Alice, 201);
+	let xt2 = uxt(Alice, 202);
+	let xt3 = uxt(Bob, 300);
+	let xt4 = uxt(Charlie, 400);
+
+	api.set_priority(&xt0, 1);
+	api.set_priority(&xt1, 3);
+	api.set_priority(&xt2, 3);
+	api.set_priority(&xt3, 2);
+	api.set_priority(&xt4, 2);
+
+	let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap();
+	let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap();
+	let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap();
+	let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap();
+
+	assert_ready_iterator!(header01.hash(), pool, [xt3, xt0, xt1, xt2]);
+	assert_pool_status!(header01.hash(), &pool, 4, 0);
+	assert_eq!(pool.mempool_len().1, 4);
+
+	let header02 = api.push_block_with_parent(header01.hash(), vec![], true);
+	block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash())));
+
+	let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap();
+	assert_ready_iterator!(header01.hash(), pool, [xt3]);
+	assert_pool_status!(header02.hash(), &pool, 2, 0);
+	assert_ready_iterator!(header02.hash(), pool, [xt3, xt4]);
+
+	assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]);
+	assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]);
+	assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]);
+	assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]);
+	assert_watcher_stream!(xt4_watcher, [TransactionStatus::Ready]);
+}
+
+#[test]
+fn fatp_prios_watcher_full_mempool_lower_prio_gets_rejected() {
+	sp_tracing::try_init_simple();
+
+	let builder = TestPoolBuilder::new();
+	let (pool, api, _) = builder.with_mempool_count_limit(2).with_ready_count(2).build();
+	api.set_nonce(api.genesis_hash(), Bob.into(), 300);
+	api.set_nonce(api.genesis_hash(), Charlie.into(), 400);
+	api.set_nonce(api.genesis_hash(), Dave.into(), 500);
+
+	let header01 = api.push_block(1, vec![], true);
+	let event = new_best_block_event(&pool, None, header01.hash());
+	block_on(pool.maintain(event));
+
+	let xt0 = uxt(Alice, 200);
+	let xt1 = uxt(Bob, 300);
+	let xt2 = uxt(Charlie, 400);
+	let xt3 = uxt(Dave, 500);
+
+	api.set_priority(&xt0, 2);
+	api.set_priority(&xt1, 2);
+	api.set_priority(&xt2, 2);
+	api.set_priority(&xt3, 1);
+
+	let _xt0_watcher =
+		block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap();
+	let _xt1_watcher =
+		block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap();
+
+	assert_pool_status!(header01.hash(), &pool, 2, 0);
+	assert_eq!(pool.mempool_len().1, 2);
+
+	let header02 = api.push_block_with_parent(header01.hash(), vec![], true);
+	block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash())));
+
+	assert_pool_status!(header02.hash(), &pool, 2, 0);
+	assert_eq!(pool.mempool_len().1, 2);
+
+	assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]);
+	assert_ready_iterator!(header02.hash(), pool, [xt0, xt1]);
+
+	let result2 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).map(|_| ());
+	assert!(matches!(result2.as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped));
+	let result3 = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).map(|_| ());
+	assert!(matches!(result3.as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped));
+}
+
+#[test]
+fn fatp_prios_watcher_full_mempool_does_not_keep_dropped_transaction() {
+	sp_tracing::try_init_simple();
+
+	let builder = TestPoolBuilder::new();
+	let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build();
+	api.set_nonce(api.genesis_hash(), Bob.into(), 300);
+	api.set_nonce(api.genesis_hash(), Charlie.into(), 400);
+	api.set_nonce(api.genesis_hash(), Dave.into(), 500);
+
+	let header01 = api.push_block(1, vec![], true);
+	let event = new_best_block_event(&pool, None, header01.hash());
+	block_on(pool.maintain(event));
+
+	let xt0 = uxt(Alice, 200);
+	let xt1 = uxt(Bob, 300);
+	let xt2 = uxt(Charlie, 400);
+	let xt3 = uxt(Dave, 500);
+
+	api.set_priority(&xt0, 2);
+	api.set_priority(&xt1, 2);
+	api.set_priority(&xt2, 2);
+	api.set_priority(&xt3, 2);
+
+	let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap();
+	let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap();
+	let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap();
+	let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap();
+
+	assert_pool_status!(header01.hash(), &pool, 2, 0);
+	assert_ready_iterator!(header01.hash(), pool, [xt2, xt3]);
+
+	assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]);
+	assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]);
+	assert_watcher_stream!(xt2_watcher, [TransactionStatus::Ready]);
+	assert_watcher_stream!(xt3_watcher, [TransactionStatus::Ready]);
+}
+
+#[test]
+fn fatp_prios_submit_local_full_mempool_higher_prio_is_accepted() {
+	sp_tracing::try_init_simple();
+
+	let builder = TestPoolBuilder::new();
+	let (pool, api, _) = builder.with_mempool_count_limit(4).with_ready_count(2).build();
+	api.set_nonce(api.genesis_hash(), Bob.into(), 300);
+	api.set_nonce(api.genesis_hash(), Charlie.into(), 400);
+	api.set_nonce(api.genesis_hash(), Dave.into(), 500);
+	api.set_nonce(api.genesis_hash(), Eve.into(), 600);
+	api.set_nonce(api.genesis_hash(), Ferdie.into(), 700);
+
+	let header01 = api.push_block(1, vec![], true);
+	let event = new_best_block_event(&pool, None, header01.hash());
+	block_on(pool.maintain(event));
+
+	let xt0 = uxt(Alice, 200);
+	let xt1 = uxt(Bob, 300);
+	let xt2 = uxt(Charlie, 400);
+
+	let xt3 = uxt(Dave, 500);
+
+	let xt4 = uxt(Eve, 600);
+	let xt5 = uxt(Ferdie, 700);
+
+	api.set_priority(&xt0, 1);
+	api.set_priority(&xt1, 2);
+	api.set_priority(&xt2, 3);
+	api.set_priority(&xt3, 4);
+
+	api.set_priority(&xt4, 5);
+	api.set_priority(&xt5, 6);
+	pool.submit_local(invalid_hash(), xt0.clone()).unwrap();
+	pool.submit_local(invalid_hash(), xt1.clone()).unwrap();
+
+	assert_pool_status!(header01.hash(), &pool, 2, 0);
+	assert_eq!(pool.mempool_len().0, 2);
+
+	let header02 = api.push_block_with_parent(header01.hash(), vec![], true);
+	block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash())));
+
+	pool.submit_local(invalid_hash(), xt2.clone()).unwrap();
+	pool.submit_local(invalid_hash(), xt3.clone()).unwrap();
+
+	assert_pool_status!(header02.hash(), &pool, 2, 0);
+	assert_eq!(pool.mempool_len().0, 4);
+
+	let header03 = api.push_block_with_parent(header02.hash(), vec![], true);
+	block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash())));
+
+	pool.submit_local(invalid_hash(), xt4.clone()).unwrap();
+	pool.submit_local(invalid_hash(), xt5.clone()).unwrap();
+
+	assert_pool_status!(header03.hash(), &pool, 2, 0);
+	assert_eq!(pool.mempool_len().0, 4);
+
+	assert_ready_iterator!(header01.hash(), pool, []);
+	assert_ready_iterator!(header02.hash(), pool, [xt3, xt2]);
+	assert_ready_iterator!(header03.hash(), pool, [xt5, xt4]);
+}
diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs
index de35726435f..c70f4548331 100644
--- a/substrate/client/transaction-pool/tests/pool.rs
+++ b/substrate/client/transaction-pool/tests/pool.rs
@@ -158,6 +158,7 @@ fn prune_tags_should_work() {
 	let (pool, api) = pool();
 	let hash209 =
 		block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into()))
+			.map(|o| o.hash())
 			.unwrap();
 	block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into()))
 		.unwrap();
@@ -184,10 +185,13 @@ fn prune_tags_should_work() {
 fn should_ban_invalid_transactions() {
 	let (pool, api) = pool();
 	let uxt = Arc::from(uxt(Alice, 209));
-	let hash =
-		block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap();
+	let hash = block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone()))
+		.unwrap()
+		.hash();
 	pool.validated_pool().remove_invalid(&[hash]);
-	block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err();
+	block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone()))
+		.map(|_| ())
+		.unwrap_err();
 
 	// when
 	let pending: Vec<_> = pool
@@ -198,7 +202,9 @@ fn should_ban_invalid_transactions() {
 	assert_eq!(pending, Vec::<Nonce>::new());
 
 	// then
-	block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err();
+	block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone()))
+		.map(|_| ())
+		.unwrap_err();
 }
 
 #[test]
diff --git a/substrate/test-utils/runtime/transaction-pool/src/lib.rs b/substrate/test-utils/runtime/transaction-pool/src/lib.rs
index 93e5855eefc..f88694fb107 100644
--- a/substrate/test-utils/runtime/transaction-pool/src/lib.rs
+++ b/substrate/test-utils/runtime/transaction-pool/src/lib.rs
@@ -352,9 +352,18 @@ impl ChainApi for TestApi {
 	fn validate_transaction(
 		&self,
 		at: <Self::Block as BlockT>::Hash,
-		_source: TransactionSource,
+		source: TransactionSource,
 		uxt: Arc<<Self::Block as BlockT>::Extrinsic>,
 	) -> Self::ValidationFuture {
+		ready(self.validate_transaction_blocking(at, source, uxt))
+	}
+
+	fn validate_transaction_blocking(
+		&self,
+		at: <Self::Block as BlockT>::Hash,
+		_source: TransactionSource,
+		uxt: Arc<<Self::Block as BlockT>::Extrinsic>,
+	) -> Result<TransactionValidity, Error> {
 		let uxt = (*uxt).clone();
 		self.validation_requests.write().push(uxt.clone());
 		let block_number;
@@ -374,16 +383,12 @@ impl ChainApi for TestApi {
 				// the transaction. (This is not required for this test function, but in real
 				// environment it would fail because of this).
 				if !found_best {
-					return ready(Ok(Err(TransactionValidityError::Invalid(
-						InvalidTransaction::Custom(1),
-					))))
+					return Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(1))))
 				}
 			},
 			Ok(None) =>
-				return ready(Ok(Err(TransactionValidityError::Invalid(
-					InvalidTransaction::Custom(2),
-				)))),
-			Err(e) => return ready(Err(e)),
+				return Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(2)))),
+			Err(e) => return Err(e),
 		}
 
 		let (requires, provides) = if let Ok(transfer) = TransferData::try_from(&uxt) {
@@ -423,7 +428,7 @@ impl ChainApi for TestApi {
 
 			if self.enable_stale_check && transfer.nonce < chain_nonce {
 				log::info!("test_api::validate_transaction: invalid_transaction(stale)....");
-				return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Stale))))
+				return Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)))
 			}
 
 			(requires, provides)
@@ -433,7 +438,7 @@ impl ChainApi for TestApi {
 
 		if self.chain.read().invalid_hashes.contains(&self.hash_and_length(&uxt).0) {
 			log::info!("test_api::validate_transaction: invalid_transaction....");
-			return ready(Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0)))))
+			return Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0))))
 		}
 
 		let priority = self.chain.read().priorities.get(&self.hash_and_length(&uxt).0).cloned();
@@ -447,16 +452,7 @@ impl ChainApi for TestApi {
 
 		(self.valid_modifier.read())(&mut validity);
 
-		ready(Ok(Ok(validity)))
-	}
-
-	fn validate_transaction_blocking(
-		&self,
-		_at: <Self::Block as BlockT>::Hash,
-		_source: TransactionSource,
-		_uxt: Arc<<Self::Block as BlockT>::Extrinsic>,
-	) -> Result<TransactionValidity, Error> {
-		unimplemented!();
+		Ok(Ok(validity))
 	}
 
 	fn block_id_to_number(
-- 
GitLab


From 105c5b94f5d3bf394a3ddf1d10ab0932ce93181b Mon Sep 17 00:00:00 2001
From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com>
Date: Tue, 14 Jan 2025 15:30:05 +0200
Subject: [PATCH 048/116] litep2p: Sufix litep2p to the identify agent version
 for visibility (#7133)

This PR adds the `(litep2p)` suffix to the agent version (user agent) of
the identify protocol.

The change is needed to gain visibility into network backends and
determine exactly the number of validators that are running litep2p.
Using tools like subp2p-explorer, we can determine if the validators are
running litep2p nodes.

This reflects on the identify protocol:

```
info=Identify {
  protocol_version: Some("/substrate/1.0"),
  agent_version: Some("polkadot-parachain/v1.17.0-967989c5d94 (kusama-node-name-01) (litep2p)")
  ...
}
```

cc @paritytech/networking

---------

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>
---
 prdoc/pr_7133.prdoc                               | 15 +++++++++++++++
 substrate/client/network/src/litep2p/discovery.rs |  2 +-
 2 files changed, 16 insertions(+), 1 deletion(-)
 create mode 100644 prdoc/pr_7133.prdoc

diff --git a/prdoc/pr_7133.prdoc b/prdoc/pr_7133.prdoc
new file mode 100644
index 00000000000..ca0d2bb0bd4
--- /dev/null
+++ b/prdoc/pr_7133.prdoc
@@ -0,0 +1,15 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Sufix litep2p to the identify agent version for visibility
+
+doc:
+  - audience: [Node Dev, Node Operator]
+    description: |
+      This PR adds the `(litep2p)` suffix to the agent version (user agent) of the identify protocol.
+      The change is needed to gain visibility into network backends and determine exactly the number of validators that are running litep2p.
+      Using tools like subp2p-explorer, we can determine if the validators are running litep2p nodes.
+
+crates:
+- name: sc-network
+  bump: patch
diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs
index 2bea2e5a80d..b55df374f60 100644
--- a/substrate/client/network/src/litep2p/discovery.rs
+++ b/substrate/client/network/src/litep2p/discovery.rs
@@ -254,7 +254,7 @@ impl Discovery {
 		_peerstore_handle: Arc<dyn PeerStoreProvider>,
 	) -> (Self, PingConfig, IdentifyConfig, KademliaConfig, Option<MdnsConfig>) {
 		let (ping_config, ping_event_stream) = PingConfig::default();
-		let user_agent = format!("{} ({})", config.client_version, config.node_name);
+		let user_agent = format!("{} ({}) (litep2p)", config.client_version, config.node_name);
 
 		let (identify_config, identify_event_stream) =
 			IdentifyConfig::new("/substrate/1.0".to_string(), Some(user_agent));
-- 
GitLab


From 023763da2043333c3524bd7f12ac6c7b2d084b39 Mon Sep 17 00:00:00 2001
From: PG Herveou <pgherveou@gmail.com>
Date: Tue, 14 Jan 2025 14:41:24 +0100
Subject: [PATCH 049/116] [pallet-revive-eth-rpc] persist eth transaction hash
 (#6836)

Add an option to persist EVM transaction hash to a SQL db.
This should make it possible to run a full archive ETH RPC node
(assuming the substrate node is also a full archive node)

Some queries such as eth_getTransactionByHash,
eth_getBlockTransactionCountByHash, and other need to work with a
transaction hash indexes, which are not stored in Substrate and need to
be stored by the eth-rpc proxy.

The refactoring break down the Client into a `BlockInfoProvider` and
`ReceiptProvider`
- BlockInfoProvider does not need any persistence data, as we can fetch
all block info from the source substrate chain
- ReceiptProvider comes in two flavor,
  - An in memory cache implementation - This is the one we had so far.
- A DB implementation - This one persist rows with the block_hash, the
transaction_index and the transaction_hash, so that we can later fetch
the block and extrinsic for that receipt and reconstruct the ReceiptInfo
object.

This PR also adds a new binary eth-indexer, that iterate past and new
blocks and write the receipt hashes to the DB using the new
ReceiptProvider.

---------

Co-authored-by: GitHub Action <action@github.com>
Co-authored-by: command-bot <>
---
 .cargo/config.toml                            |   1 +
 .github/workflows/build-publish-eth-rpc.yml   |  37 +-
 Cargo.lock                                    | 476 +++++++++++++--
 prdoc/pr_6836.prdoc                           |  17 +
 ...c1135227c1150f2c5083d1c7c6086b717ada0.json |  12 +
 ...68c427245f94b80d37ec3aef04cd96fb36298.json |  20 +
 ...332be50096d4e37be04ed8b6f46ac5c242043.json |  26 +
 substrate/frame/revive/rpc/Cargo.toml         |  11 +
 .../rpc/dockerfiles/eth-indexer/Dockerfile    |  28 +
 .../rpc/{ => dockerfiles/eth-rpc}/Dockerfile  |   0
 .../frame/revive/rpc/examples/js/bun.lockb    | Bin 40649 -> 46862 bytes
 .../frame/revive/rpc/examples/js/package.json |  14 +-
 .../rpc/examples/js/src/build-contracts.ts    |   7 +-
 .../rpc/examples/js/src/geth-diff.test.ts     |  66 +-
 .../frame/revive/rpc/examples/js/src/lib.ts   |   1 -
 .../revive/rpc/examples/js/src/piggy-bank.ts  |   4 +-
 .../revive/rpc/examples/js/src/spammer.ts     | 104 ++++
 .../js/src/{geth-diff-setup.ts => util.ts}    |  74 +--
 .../rpc/examples/westend_local_network.toml   |   8 +-
 ...241205165418_create_transaction_hashes.sql |  15 +
 .../revive/rpc/src/block_info_provider.rs     | 250 ++++++++
 substrate/frame/revive/rpc/src/cli.rs         |  61 +-
 substrate/frame/revive/rpc/src/client.rs      | 571 ++++++++----------
 substrate/frame/revive/rpc/src/eth-indexer.rs |  88 +++
 substrate/frame/revive/rpc/src/lib.rs         |  27 +-
 .../frame/revive/rpc/src/receipt_provider.rs  | 240 ++++++++
 .../revive/rpc/src/receipt_provider/cache.rs  | 148 +++++
 .../revive/rpc/src/receipt_provider/db.rs     | 216 +++++++
 substrate/frame/revive/rpc/src/rpc_health.rs  |   9 +
 .../frame/revive/rpc/src/rpc_methods_gen.rs   |   4 +
 .../frame/revive/src/evm/api/rpc_types.rs     |  12 +-
 .../frame/revive/src/evm/api/rpc_types_gen.rs |  10 +-
 substrate/frame/revive/src/wasm/mod.rs        |   5 +-
 33 files changed, 2090 insertions(+), 472 deletions(-)
 create mode 100644 prdoc/pr_6836.prdoc
 create mode 100644 substrate/frame/revive/rpc/.sqlx/query-027a434a38822c2ba4439e8f9f9c1135227c1150f2c5083d1c7c6086b717ada0.json
 create mode 100644 substrate/frame/revive/rpc/.sqlx/query-2348bd412ca114197996e4395fd68c427245f94b80d37ec3aef04cd96fb36298.json
 create mode 100644 substrate/frame/revive/rpc/.sqlx/query-29af64347f700919dc2ee12463f332be50096d4e37be04ed8b6f46ac5c242043.json
 create mode 100644 substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile
 rename substrate/frame/revive/rpc/{ => dockerfiles/eth-rpc}/Dockerfile (100%)
 create mode 100644 substrate/frame/revive/rpc/examples/js/src/spammer.ts
 rename substrate/frame/revive/rpc/examples/js/src/{geth-diff-setup.ts => util.ts} (62%)
 create mode 100644 substrate/frame/revive/rpc/migrations/20241205165418_create_transaction_hashes.sql
 create mode 100644 substrate/frame/revive/rpc/src/block_info_provider.rs
 create mode 100644 substrate/frame/revive/rpc/src/eth-indexer.rs
 create mode 100644 substrate/frame/revive/rpc/src/receipt_provider.rs
 create mode 100644 substrate/frame/revive/rpc/src/receipt_provider/cache.rs
 create mode 100644 substrate/frame/revive/rpc/src/receipt_provider/db.rs

diff --git a/.cargo/config.toml b/.cargo/config.toml
index 68a0d7b552d..8573f582e25 100644
--- a/.cargo/config.toml
+++ b/.cargo/config.toml
@@ -9,6 +9,7 @@ rustdocflags = [
 CC_x86_64_unknown_linux_musl = { value = ".cargo/musl-gcc", force = true, relative = true }
 CXX_x86_64_unknown_linux_musl = { value = ".cargo/musl-g++", force = true, relative = true }
 CARGO_WORKSPACE_ROOT_DIR = { value = "", relative = true }
+SQLX_OFFLINE = "true"
 
 [net]
 retry = 5
diff --git a/.github/workflows/build-publish-eth-rpc.yml b/.github/workflows/build-publish-eth-rpc.yml
index 3aa1624096d..a98b3881a14 100644
--- a/.github/workflows/build-publish-eth-rpc.yml
+++ b/.github/workflows/build-publish-eth-rpc.yml
@@ -12,7 +12,8 @@ concurrency:
   cancel-in-progress: true
 
 env:
-  IMAGE_NAME: "docker.io/paritypr/eth-rpc"
+  ETH_RPC_IMAGE_NAME: "docker.io/paritypr/eth-rpc"
+  ETH_INDEXER_IMAGE_NAME: "docker.io/paritypr/eth-indexer"
 
 jobs:
   set-variables:
@@ -34,7 +35,7 @@ jobs:
           echo "set VERSION=${VERSION}"
 
   build_docker:
-    name: Build docker image
+    name: Build docker images
     runs-on: parity-large
     needs: [set-variables]
     env:
@@ -43,17 +44,26 @@ jobs:
       - name: Check out the repo
         uses: actions/checkout@v4
 
-      - name: Build Docker image
+      - name: Build eth-rpc Docker image
         uses: docker/build-push-action@v6
         with:
           context: .
-          file: ./substrate/frame/revive/rpc/Dockerfile
+          file: ./substrate/frame/revive/rpc/dockerfiles/eth-rpc/Dockerfile
           push: false
           tags: |
-            ${{ env.IMAGE_NAME }}:${{ env.VERSION }}
+            ${{ env.ETH_RPC_IMAGE_NAME }}:${{ env.VERSION }}
+
+      - name: Build eth-indexer Docker image
+        uses: docker/build-push-action@v6
+        with:
+          context: .
+          file: ./substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile
+          push: false
+          tags: |
+            ${{ env.ETH_INDEXER_IMAGE_NAME }}:${{ env.VERSION }}
 
   build_push_docker:
-    name: Build and push docker image
+    name: Build and push docker images
     runs-on: parity-large
     if: github.ref == 'refs/heads/master'
     needs: [set-variables]
@@ -69,11 +79,20 @@ jobs:
           username: ${{ secrets.PARITYPR_DOCKERHUB_USERNAME }}
           password: ${{ secrets.PARITYPR_DOCKERHUB_PASSWORD }}
 
-      - name: Build Docker image
+      - name: Build eth-rpc Docker image
+        uses: docker/build-push-action@v6
+        with:
+          context: .
+          file: ./substrate/frame/revive/rpc/dockerfiles/eth-rpc/Dockerfile
+          push: true
+          tags: |
+            ${{ env.ETH_RPC_IMAGE_NAME }}:${{ env.VERSION }}
+
+      - name: Build eth-indexer Docker image
         uses: docker/build-push-action@v6
         with:
           context: .
-          file: ./substrate/frame/revive/rpc/Dockerfile
+          file: ./substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile
           push: true
           tags: |
-            ${{ env.IMAGE_NAME }}:${{ env.VERSION }}
+            ${{ env.ETH_INDEXER_IMAGE_NAME }}:${{ env.VERSION }}
diff --git a/Cargo.lock b/Cargo.lock
index cfb805fbe84..3eab84d5ed1 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1396,7 +1396,7 @@ dependencies = [
  "futures-lite 2.3.0",
  "parking",
  "polling 3.4.0",
- "rustix 0.38.21",
+ "rustix 0.38.42",
  "slab",
  "tracing",
  "windows-sys 0.52.0",
@@ -1478,7 +1478,7 @@ dependencies = [
  "cfg-if",
  "event-listener 5.3.1",
  "futures-lite 2.3.0",
- "rustix 0.38.21",
+ "rustix 0.38.42",
  "tracing",
 ]
 
@@ -1494,7 +1494,7 @@ dependencies = [
  "cfg-if",
  "futures-core",
  "futures-io",
- "rustix 0.38.21",
+ "rustix 0.38.42",
  "signal-hook-registry",
  "slab",
  "windows-sys 0.52.0",
@@ -1592,6 +1592,15 @@ dependencies = [
  "pin-project-lite",
 ]
 
+[[package]]
+name = "atoi"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528"
+dependencies = [
+ "num-traits",
+]
+
 [[package]]
 name = "atomic-take"
 version = "1.1.0"
@@ -1880,6 +1889,9 @@ name = "bitflags"
 version = "2.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
+dependencies = [
+ "serde",
+]
 
 [[package]]
 name = "bitvec"
@@ -4391,6 +4403,21 @@ dependencies = [
  "wasmtime-types",
 ]
 
+[[package]]
+name = "crc"
+version = "3.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636"
+dependencies = [
+ "crc-catalog",
+]
+
+[[package]]
+name = "crc-catalog"
+version = "2.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5"
+
 [[package]]
 name = "crc32fast"
 version = "1.3.2"
@@ -5945,6 +5972,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c"
 dependencies = [
  "const-oid",
+ "pem-rfc7468",
  "zeroize",
 ]
 
@@ -6226,6 +6254,12 @@ dependencies = [
  "litrs",
 ]
 
+[[package]]
+name = "dotenvy"
+version = "0.15.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b"
+
 [[package]]
 name = "downcast"
 version = "0.11.0"
@@ -6351,6 +6385,9 @@ name = "either"
 version = "1.13.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
+dependencies = [
+ "serde",
+]
 
 [[package]]
 name = "elliptic-curve"
@@ -6559,23 +6596,23 @@ dependencies = [
 
 [[package]]
 name = "errno"
-version = "0.3.2"
+version = "0.3.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f"
+checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d"
 dependencies = [
- "errno-dragonfly",
  "libc",
- "windows-sys 0.48.0",
+ "windows-sys 0.59.0",
 ]
 
 [[package]]
-name = "errno-dragonfly"
-version = "0.1.2"
+name = "etcetera"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
+checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943"
 dependencies = [
- "cc",
- "libc",
+ "cfg-if",
+ "home",
+ "windows-sys 0.48.0",
 ]
 
 [[package]]
@@ -6772,9 +6809,9 @@ dependencies = [
 
 [[package]]
 name = "fastrand"
-version = "2.1.0"
+version = "2.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a"
+checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
 
 [[package]]
 name = "fastrlp"
@@ -6989,6 +7026,17 @@ dependencies = [
  "num-traits",
 ]
 
+[[package]]
+name = "flume"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+ "spin 0.9.8",
+]
+
 [[package]]
 name = "fnv"
 version = "1.0.7"
@@ -7837,7 +7885,7 @@ version = "0.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "29f9df8a11882c4e3335eb2d18a0137c505d9ca927470b0cac9c6f0ae07d28f7"
 dependencies = [
- "rustix 0.38.21",
+ "rustix 0.38.42",
  "windows-sys 0.48.0",
 ]
 
@@ -7906,6 +7954,17 @@ dependencies = [
  "num_cpus",
 ]
 
+[[package]]
+name = "futures-intrusive"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f"
+dependencies = [
+ "futures-core",
+ "lock_api",
+ "parking_lot 0.12.3",
+]
+
 [[package]]
 name = "futures-io"
 version = "0.3.31"
@@ -7933,7 +7992,7 @@ version = "2.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5"
 dependencies = [
- "fastrand 2.1.0",
+ "fastrand 2.3.0",
  "futures-core",
  "futures-io",
  "parking",
@@ -8369,6 +8428,15 @@ dependencies = [
  "hashbrown 0.14.5",
 ]
 
+[[package]]
+name = "hashlink"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af"
+dependencies = [
+ "hashbrown 0.14.5",
+]
+
 [[package]]
 name = "heck"
 version = "0.3.3"
@@ -9100,7 +9168,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
 dependencies = [
  "hermit-abi 0.3.9",
- "rustix 0.38.21",
+ "rustix 0.38.42",
  "windows-sys 0.48.0",
 ]
 
@@ -9701,6 +9769,9 @@ name = "lazy_static"
 version = "1.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
+dependencies = [
+ "spin 0.9.8",
+]
 
 [[package]]
 name = "lazycell"
@@ -9716,9 +9787,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67"
 
 [[package]]
 name = "libc"
-version = "0.2.158"
+version = "0.2.169"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
+checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a"
 
 [[package]]
 name = "libflate"
@@ -10264,6 +10335,17 @@ dependencies = [
  "libsecp256k1-core",
 ]
 
+[[package]]
+name = "libsqlite3-sys"
+version = "0.30.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149"
+dependencies = [
+ "cc",
+ "pkg-config",
+ "vcpkg",
+]
+
 [[package]]
 name = "libz-sys"
 version = "1.1.12"
@@ -10323,9 +10405,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
 
 [[package]]
 name = "linux-raw-sys"
-version = "0.4.10"
+version = "0.4.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f"
+checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89"
 
 [[package]]
 name = "lioness"
@@ -10607,6 +10689,16 @@ dependencies = [
  "rawpointer",
 ]
 
+[[package]]
+name = "md-5"
+version = "0.10.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf"
+dependencies = [
+ "cfg-if",
+ "digest 0.10.7",
+]
+
 [[package]]
 name = "memchr"
 version = "2.7.4"
@@ -10782,7 +10874,7 @@ dependencies = [
  "c2-chacha",
  "curve25519-dalek 4.1.3",
  "either",
- "hashlink",
+ "hashlink 0.8.4",
  "lioness",
  "log",
  "parking_lot 0.12.3",
@@ -11453,6 +11545,23 @@ dependencies = [
  "num-traits",
 ]
 
+[[package]]
+name = "num-bigint-dig"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151"
+dependencies = [
+ "byteorder",
+ "lazy_static",
+ "libm",
+ "num-integer",
+ "num-iter",
+ "num-traits",
+ "rand",
+ "smallvec",
+ "zeroize",
+]
+
 [[package]]
 name = "num-complex"
 version = "0.4.4"
@@ -14809,9 +14918,11 @@ dependencies = [
  "sc-rpc",
  "sc-rpc-api",
  "sc-service",
+ "sp-arithmetic 23.0.0",
  "sp-core 28.0.0",
  "sp-crypto-hashing 0.1.0",
  "sp-weights 27.0.0",
+ "sqlx",
  "static_init",
  "substrate-cli-test-utils",
  "substrate-prometheus-endpoint",
@@ -16516,6 +16627,15 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "pem-rfc7468"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412"
+dependencies = [
+ "base64ct",
+]
+
 [[package]]
 name = "penpal-emulated-chain"
 version = "0.0.0"
@@ -16890,6 +17010,17 @@ version = "0.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
 
+[[package]]
+name = "pkcs1"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f"
+dependencies = [
+ "der",
+ "pkcs8",
+ "spki",
+]
+
 [[package]]
 name = "pkcs8"
 version = "0.10.2"
@@ -20030,7 +20161,7 @@ dependencies = [
  "cfg-if",
  "concurrent-queue",
  "pin-project-lite",
- "rustix 0.38.21",
+ "rustix 0.38.42",
  "tracing",
  "windows-sys 0.52.0",
 ]
@@ -20338,7 +20469,7 @@ dependencies = [
  "hex",
  "lazy_static",
  "procfs-core",
- "rustix 0.38.21",
+ "rustix 0.38.42",
 ]
 
 [[package]]
@@ -20871,11 +21002,11 @@ dependencies = [
 
 [[package]]
 name = "redox_syscall"
-version = "0.4.1"
+version = "0.5.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa"
+checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834"
 dependencies = [
- "bitflags 1.3.2",
+ "bitflags 2.6.0",
 ]
 
 [[package]]
@@ -21533,6 +21664,26 @@ dependencies = [
  "winapi",
 ]
 
+[[package]]
+name = "rsa"
+version = "0.9.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af6c4b23d99685a1408194da11270ef8e9809aff951cc70ec9b17350b087e474"
+dependencies = [
+ "const-oid",
+ "digest 0.10.7",
+ "num-bigint-dig",
+ "num-integer",
+ "num-traits",
+ "pkcs1",
+ "pkcs8",
+ "rand_core 0.6.4",
+ "signature",
+ "spki",
+ "subtle 2.5.0",
+ "zeroize",
+]
+
 [[package]]
 name = "rstest"
 version = "0.18.2"
@@ -21707,15 +21858,15 @@ dependencies = [
 
 [[package]]
 name = "rustix"
-version = "0.38.21"
+version = "0.38.42"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3"
+checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85"
 dependencies = [
  "bitflags 2.6.0",
  "errno",
  "libc",
- "linux-raw-sys 0.4.10",
- "windows-sys 0.48.0",
+ "linux-raw-sys 0.4.14",
+ "windows-sys 0.59.0",
 ]
 
 [[package]]
@@ -24439,6 +24590,9 @@ name = "smallvec"
 version = "1.13.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
+dependencies = [
+ "serde",
+]
 
 [[package]]
 name = "smol"
@@ -27690,6 +27844,9 @@ name = "spin"
 version = "0.9.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
+dependencies = [
+ "lock_api",
+]
 
 [[package]]
 name = "spinners"
@@ -27712,6 +27869,210 @@ dependencies = [
  "der",
 ]
 
+[[package]]
+name = "sqlformat"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790"
+dependencies = [
+ "nom",
+ "unicode_categories",
+]
+
+[[package]]
+name = "sqlx"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93334716a037193fac19df402f8571269c84a00852f6a7066b5d2616dcd64d3e"
+dependencies = [
+ "sqlx-core",
+ "sqlx-macros",
+ "sqlx-mysql",
+ "sqlx-postgres",
+ "sqlx-sqlite",
+]
+
+[[package]]
+name = "sqlx-core"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d4d8060b456358185f7d50c55d9b5066ad956956fddec42ee2e8567134a8936e"
+dependencies = [
+ "atoi",
+ "byteorder",
+ "bytes",
+ "crc",
+ "crossbeam-queue",
+ "either",
+ "event-listener 5.3.1",
+ "futures-channel",
+ "futures-core",
+ "futures-intrusive",
+ "futures-io",
+ "futures-util",
+ "hashbrown 0.14.5",
+ "hashlink 0.9.1",
+ "hex",
+ "indexmap 2.7.0",
+ "log",
+ "memchr",
+ "once_cell",
+ "paste",
+ "percent-encoding",
+ "serde",
+ "serde_json",
+ "sha2 0.10.8",
+ "smallvec",
+ "sqlformat",
+ "thiserror",
+ "tokio",
+ "tokio-stream",
+ "tracing",
+ "url",
+]
+
+[[package]]
+name = "sqlx-macros"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657"
+dependencies = [
+ "proc-macro2 1.0.86",
+ "quote 1.0.37",
+ "sqlx-core",
+ "sqlx-macros-core",
+ "syn 2.0.87",
+]
+
+[[package]]
+name = "sqlx-macros-core"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1804e8a7c7865599c9c79be146dc8a9fd8cc86935fa641d3ea58e5f0688abaa5"
+dependencies = [
+ "dotenvy",
+ "either",
+ "heck 0.5.0",
+ "hex",
+ "once_cell",
+ "proc-macro2 1.0.86",
+ "quote 1.0.37",
+ "serde",
+ "serde_json",
+ "sha2 0.10.8",
+ "sqlx-core",
+ "sqlx-mysql",
+ "sqlx-postgres",
+ "sqlx-sqlite",
+ "syn 2.0.87",
+ "tempfile",
+ "tokio",
+ "url",
+]
+
+[[package]]
+name = "sqlx-mysql"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "64bb4714269afa44aef2755150a0fc19d756fb580a67db8885608cf02f47d06a"
+dependencies = [
+ "atoi",
+ "base64 0.22.1",
+ "bitflags 2.6.0",
+ "byteorder",
+ "bytes",
+ "crc",
+ "digest 0.10.7",
+ "dotenvy",
+ "either",
+ "futures-channel",
+ "futures-core",
+ "futures-io",
+ "futures-util",
+ "generic-array 0.14.7",
+ "hex",
+ "hkdf",
+ "hmac 0.12.1",
+ "itoa",
+ "log",
+ "md-5",
+ "memchr",
+ "once_cell",
+ "percent-encoding",
+ "rand",
+ "rsa",
+ "serde",
+ "sha1",
+ "sha2 0.10.8",
+ "smallvec",
+ "sqlx-core",
+ "stringprep",
+ "thiserror",
+ "tracing",
+ "whoami",
+]
+
+[[package]]
+name = "sqlx-postgres"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6fa91a732d854c5d7726349bb4bb879bb9478993ceb764247660aee25f67c2f8"
+dependencies = [
+ "atoi",
+ "base64 0.22.1",
+ "bitflags 2.6.0",
+ "byteorder",
+ "crc",
+ "dotenvy",
+ "etcetera",
+ "futures-channel",
+ "futures-core",
+ "futures-io",
+ "futures-util",
+ "hex",
+ "hkdf",
+ "hmac 0.12.1",
+ "home",
+ "itoa",
+ "log",
+ "md-5",
+ "memchr",
+ "once_cell",
+ "rand",
+ "serde",
+ "serde_json",
+ "sha2 0.10.8",
+ "smallvec",
+ "sqlx-core",
+ "stringprep",
+ "thiserror",
+ "tracing",
+ "whoami",
+]
+
+[[package]]
+name = "sqlx-sqlite"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d5b2cf34a45953bfd3daaf3db0f7a7878ab9b7a6b91b422d24a7a9e4c857b680"
+dependencies = [
+ "atoi",
+ "flume",
+ "futures-channel",
+ "futures-core",
+ "futures-executor",
+ "futures-intrusive",
+ "futures-util",
+ "libsqlite3-sys",
+ "log",
+ "percent-encoding",
+ "serde",
+ "serde_urlencoded",
+ "sqlx-core",
+ "tracing",
+ "url",
+]
+
 [[package]]
 name = "ss58-registry"
 version = "1.43.0"
@@ -28039,6 +28400,17 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "stringprep"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1"
+dependencies = [
+ "unicode-bidi",
+ "unicode-normalization",
+ "unicode-properties",
+]
+
 [[package]]
 name = "strsim"
 version = "0.8.0"
@@ -29004,15 +29376,15 @@ checksum = "9d0e916b1148c8e263850e1ebcbd046f333e0683c724876bb0da63ea4373dc8a"
 
 [[package]]
 name = "tempfile"
-version = "3.8.1"
+version = "3.14.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5"
+checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c"
 dependencies = [
  "cfg-if",
- "fastrand 2.1.0",
- "redox_syscall 0.4.1",
- "rustix 0.38.21",
- "windows-sys 0.48.0",
+ "fastrand 2.3.0",
+ "once_cell",
+ "rustix 0.38.42",
+ "windows-sys 0.59.0",
 ]
 
 [[package]]
@@ -29041,7 +29413,7 @@ version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7"
 dependencies = [
- "rustix 0.38.21",
+ "rustix 0.38.42",
  "windows-sys 0.48.0",
 ]
 
@@ -29992,6 +30364,12 @@ dependencies = [
  "tinyvec",
 ]
 
+[[package]]
+name = "unicode-properties"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0"
+
 [[package]]
 name = "unicode-segmentation"
 version = "1.11.0"
@@ -30016,6 +30394,12 @@ version = "0.2.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
 
+[[package]]
+name = "unicode_categories"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e"
+
 [[package]]
 name = "universal-hash"
 version = "0.5.1"
@@ -30259,6 +30643,12 @@ version = "0.11.0+wasi-snapshot-preview1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
 
+[[package]]
+name = "wasite"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b"
+
 [[package]]
 name = "wasm-bindgen"
 version = "0.2.95"
@@ -30998,6 +31388,16 @@ dependencies = [
  "westend-emulated-chain",
 ]
 
+[[package]]
+name = "whoami"
+version = "1.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d"
+dependencies = [
+ "redox_syscall 0.5.8",
+ "wasite",
+]
+
 [[package]]
 name = "wide"
 version = "0.7.11"
diff --git a/prdoc/pr_6836.prdoc b/prdoc/pr_6836.prdoc
new file mode 100644
index 00000000000..1de081bbaa4
--- /dev/null
+++ b/prdoc/pr_6836.prdoc
@@ -0,0 +1,17 @@
+title: '[pallet-revive-eth-rpc] persist eth transaction hash'
+doc:
+- audience: Runtime Dev
+  description: |-
+    Add an option to persist EVM transaction hash to a SQL db.
+    This make it possible to run a full archive ETH RPC node (assuming the substrate node is also a full archive node)
+
+    Some queries such as eth_getTransactionByHash,  eth_getBlockTransactionCountByHash, and other need to work with a transaction hash index, which is not available in Substrate and need to be stored by the eth-rpc proxy.
+
+    The refactoring break down the Client  into a `BlockInfoProvider` and `ReceiptProvider`
+    - BlockInfoProvider does not need any persistence data, as we can fetch all block info from the source substrate chain
+    - ReceiptProvider comes in two flavor,
+      - An in memory cache implementation - This is the one we had so far.
+      - A DB implementation - This one persist rows with the block_hash, the transaction_index and the transaction_hash, so that we can later fetch the block and extrinsic for that receipt and reconstruct the ReceiptInfo object.
+crates:
+- name: pallet-revive-eth-rpc
+  bump: minor
diff --git a/substrate/frame/revive/rpc/.sqlx/query-027a434a38822c2ba4439e8f9f9c1135227c1150f2c5083d1c7c6086b717ada0.json b/substrate/frame/revive/rpc/.sqlx/query-027a434a38822c2ba4439e8f9f9c1135227c1150f2c5083d1c7c6086b717ada0.json
new file mode 100644
index 00000000000..01627614490
--- /dev/null
+++ b/substrate/frame/revive/rpc/.sqlx/query-027a434a38822c2ba4439e8f9f9c1135227c1150f2c5083d1c7c6086b717ada0.json
@@ -0,0 +1,12 @@
+{
+  "db_name": "SQLite",
+  "query": "\n\t\t\t\tINSERT INTO transaction_hashes (transaction_hash, block_hash, transaction_index)\n\t\t\t\tVALUES ($1, $2, $3)\n\n\t\t\t\tON CONFLICT(transaction_hash) DO UPDATE SET\n\t\t\t\tblock_hash = EXCLUDED.block_hash,\n\t\t\t\ttransaction_index = EXCLUDED.transaction_index\n\t\t\t\t",
+  "describe": {
+    "columns": [],
+    "parameters": {
+      "Right": 3
+    },
+    "nullable": []
+  },
+  "hash": "027a434a38822c2ba4439e8f9f9c1135227c1150f2c5083d1c7c6086b717ada0"
+}
diff --git a/substrate/frame/revive/rpc/.sqlx/query-2348bd412ca114197996e4395fd68c427245f94b80d37ec3aef04cd96fb36298.json b/substrate/frame/revive/rpc/.sqlx/query-2348bd412ca114197996e4395fd68c427245f94b80d37ec3aef04cd96fb36298.json
new file mode 100644
index 00000000000..507564cd05c
--- /dev/null
+++ b/substrate/frame/revive/rpc/.sqlx/query-2348bd412ca114197996e4395fd68c427245f94b80d37ec3aef04cd96fb36298.json
@@ -0,0 +1,20 @@
+{
+  "db_name": "SQLite",
+  "query": "\n            SELECT COUNT(*) as count\n            FROM transaction_hashes\n            WHERE block_hash = $1\n            ",
+  "describe": {
+    "columns": [
+      {
+        "name": "count",
+        "ordinal": 0,
+        "type_info": "Integer"
+      }
+    ],
+    "parameters": {
+      "Right": 1
+    },
+    "nullable": [
+      false
+    ]
+  },
+  "hash": "2348bd412ca114197996e4395fd68c427245f94b80d37ec3aef04cd96fb36298"
+}
diff --git a/substrate/frame/revive/rpc/.sqlx/query-29af64347f700919dc2ee12463f332be50096d4e37be04ed8b6f46ac5c242043.json b/substrate/frame/revive/rpc/.sqlx/query-29af64347f700919dc2ee12463f332be50096d4e37be04ed8b6f46ac5c242043.json
new file mode 100644
index 00000000000..2443035c433
--- /dev/null
+++ b/substrate/frame/revive/rpc/.sqlx/query-29af64347f700919dc2ee12463f332be50096d4e37be04ed8b6f46ac5c242043.json
@@ -0,0 +1,26 @@
+{
+  "db_name": "SQLite",
+  "query": "\n\t\t\tSELECT block_hash, transaction_index\n\t\t\tFROM transaction_hashes\n\t\t\tWHERE transaction_hash = $1\n\t\t\t",
+  "describe": {
+    "columns": [
+      {
+        "name": "block_hash",
+        "ordinal": 0,
+        "type_info": "Text"
+      },
+      {
+        "name": "transaction_index",
+        "ordinal": 1,
+        "type_info": "Integer"
+      }
+    ],
+    "parameters": {
+      "Right": 1
+    },
+    "nullable": [
+      false,
+      false
+    ]
+  },
+  "hash": "29af64347f700919dc2ee12463f332be50096d4e37be04ed8b6f46ac5c242043"
+}
diff --git a/substrate/frame/revive/rpc/Cargo.toml b/substrate/frame/revive/rpc/Cargo.toml
index cfaaa102fc3..9d822f5ff8e 100644
--- a/substrate/frame/revive/rpc/Cargo.toml
+++ b/substrate/frame/revive/rpc/Cargo.toml
@@ -7,11 +7,16 @@ license = "Apache-2.0"
 homepage.workspace = true
 repository.workspace = true
 description = "An Ethereum JSON-RPC server for pallet-revive."
+default-run = "eth-rpc"
 
 [[bin]]
 name = "eth-rpc"
 path = "src/main.rs"
 
+[[bin]]
+name = "eth-indexer"
+path = "src/eth-indexer.rs"
+
 [[example]]
 name = "deploy"
 path = "examples/rust/deploy.rs"
@@ -53,9 +58,15 @@ sc-cli = { workspace = true, default-features = true }
 sc-rpc = { workspace = true, default-features = true }
 sc-rpc-api = { workspace = true, default-features = true }
 sc-service = { workspace = true, default-features = true }
+sp-arithmetic = { workspace = true, default-features = true }
 sp-core = { workspace = true, default-features = true }
 sp-crypto-hashing = { workspace = true }
 sp-weights = { workspace = true, default-features = true }
+sqlx = { version = "0.8.2", features = [
+	"macros",
+	"runtime-tokio",
+	"sqlite",
+] }
 subxt = { workspace = true, default-features = true, features = ["reconnecting-rpc-client"] }
 subxt-signer = { workspace = true, optional = true, features = [
 	"unstable-eth",
diff --git a/substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile b/substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile
new file mode 100644
index 00000000000..77fa846a145
--- /dev/null
+++ b/substrate/frame/revive/rpc/dockerfiles/eth-indexer/Dockerfile
@@ -0,0 +1,28 @@
+FROM rust AS builder
+
+RUN apt-get update && \
+		DEBIAN_FRONTEND=noninteractive apt-get install -y \
+		protobuf-compiler \
+		clang libclang-dev
+
+WORKDIR /polkadot
+COPY . /polkadot
+RUN rustup component add rust-src
+RUN cargo build --locked --profile production -p pallet-revive-eth-rpc --bin eth-indexer
+
+FROM docker.io/parity/base-bin:latest
+COPY --from=builder /polkadot/target/production/eth-indexer /usr/local/bin
+
+USER root
+RUN useradd -m -u 1001 -U -s /bin/sh -d /polkadot polkadot && \
+# unclutter and minimize the attack surface
+	rm -rf /usr/bin /usr/sbin && \
+# check if executable works in this container
+	/usr/local/bin/eth-indexer --help
+
+USER polkadot
+
+ENTRYPOINT ["/usr/local/bin/eth-indexer"]
+
+# We call the help by default
+CMD ["--help"]
diff --git a/substrate/frame/revive/rpc/Dockerfile b/substrate/frame/revive/rpc/dockerfiles/eth-rpc/Dockerfile
similarity index 100%
rename from substrate/frame/revive/rpc/Dockerfile
rename to substrate/frame/revive/rpc/dockerfiles/eth-rpc/Dockerfile
diff --git a/substrate/frame/revive/rpc/examples/js/bun.lockb b/substrate/frame/revive/rpc/examples/js/bun.lockb
index 67df5841e43fba141c7a146a1e4a8958b4c7a84c..39a1d0906b70e1c5cecc7df65976f4db77f41d17 100755
GIT binary patch
literal 46862
zcmeHwhgTF$(=XtP$fJOYV!#JX7|Gdy1Qo=HVpfo_$Rf#Iq5+8#1O+5YP;!zSBnjtK
z{bTpnvpcZstBCU6d%pV}Tbt>b?yl;tuCA)?sh&Ns&pBPAc8*phJ1dhj?4#3@%@G|F
zQ(~URsS=fraVhGgs5Hk67jqLMBO_i+pb_fj4w^CH7MkV`9<ii+uA>js<3nF5_asl#
zB=tw~;3F=*Mio5C6QMV#9hDIhwHG6^@2-@qdvN-xE@2d_OiB^y43#oTT&W0uHshL`
zB_~ALUp<LLnN6aWC))pyNZgd4L`AF8vQw2tDoskJ1`nk_xTXjM2-Lq$2cK$?+94`y
zQW9fCURqMBxEOJR!pLZ&l%q__P^yt?!4nB%g2qu3ALVXD6?kGL%21(PIs*v{@kp5#
zuT%rOj66+~sEP*QD5b_R0nhoZsjl5Lu|Ws;KM7!s8WK1f2Wchj8eMy~p>@&&wbXDx
ztjCjW0(X)sS(T*Hq~Rj^V#MXhk!U2kBo#S^TP@%Yq$D^zf|R+`op(rWms0u2!VTqS
z;O|b6c$zK2{9y+&RB1|)8>dlbD3jAPsVSPY7>&k=x`0a>QEEa;LXsSnoT7>~GE!=y
z(^ZMFMo+dF8QBN~VMs`0a!O7dk`tD4f@eW@sfFB)6v#xmTrH<)VJX$Wl&V`w<;$sL
zIdNZ31S28Q$3sr+lM@zlg6A!bjJ8S``<GG;OQ|<<iYDZgIwdhNJr!i$ZDh1psyMRC
z$jDYoeLzZjMzcuaKe(wGmw!bn3yFgwaTf`DkvJ)5*&uO1JmE<U%YG?Uhm@U2(buI^
zBT^Eq=klWnBqZEHNbC{Cy^xSb2Lq!WoK&?kElnlZUiZLspKdz*h;KvXOse9V`&J=I
z)Jo&LKp&}X9WC(q4$h;_Y$~O?oixEyW?r06i?-affoCb72I(85%B%DAiMm#qlf%8-
z7!t<C89TWC<gpVuT<%8=>0I`Zr}t8CmU;L+8c3w-;|l6|%z6Ks+@mQjQ*KaPd_~jr
z?liwe`(~&OvHh`MJoQj14=Np^js(+tb8hzDoRsCvL)3qa+fr;@`P1<r>kQi*YRcxu
z7z_GB1^h0WCbnPXuG^cb`XF~4a_82q^yv<DS@S|b02fDkP&fDO*}slHh0$jZ?&3NN
z-{h^^FI}Xzy*w34t(;5wp;iy}J*6V=2mD2^Z5QYhWN<&V+t3_Oa{~|H-#cpJmLRI4
zD(CxD;XspTd^e_?<~Q6Q20G9EoNzl%aE*edHmo<XrY0IjOFmN@ErjZ|p6mSm9c)tR
zJ&p1(=W;U@Q1@OQpvr4B$W@s<$o0ECsS&NJp{X!>!`&`innTs85KG*a5gMnlx&I&v
z6dQ@R<wcsm!`0l#6SwScqnC?0=FjNOVN@?R0`6~{4^XlBCZ6K4Gz(X*;!c|3LXQZa
zP%h&kH)`6Q<z>wcnZ9Av^KTq~<Wj^|p1VPfKk54sg~}>;BhO}GY%3|>hKkUV5^jAM
z#qR*~%VjglJI=M#q2{WmH1<$Kr6J(X3F<odGV+?;!Tl^0-CL^zXM!>}@Hl<tDr&RZ
z%Y{d{?^@zxYUf4dSJ6xW4O~z?QlC9c&6{n`aUa-xD1(b{_;KZN+r3-BMy-2XXqG>4
zU(9_j;VH_sdr6(QphQ2M;x7jhr~+eE61{`xH(J_R^VG4eN8F9+E!1usH&H>{1y`y=
zZ}T|ch=+n5xX;9YC(rV%>kTgRQgX?L>r{V%dy_cdY>VG<nl`2yE~gR&eTk3Uax#nB
zsV^sihs+LfwM!s<jHIuomL^`8QDzEbUQH9v0lN!b>D^`%%wM^O>e6_O+Rmb>b1IrS
zO}!qbN!$&3x6&fD(106#^Sz$PO(B-l6@ZE*YHB(RJ}9}k*^=A2%Jl@#^IM+Z`}~?Q
zcfACabI)!Fi&KOCrLo}>lr}2n3Ci0I{CT$bax!@Gi1%R{^l{@kuDrnA0f)?ZGUyx^
z@^l0>=6Fyu25phv(tG|!&Ghxa7Vh`qkr&r2tN^Ks#t$hTxo|HPZ8J^d>exgK^0)~m
z0T1zzKYa#AEzl%}><f>X@8+^YG-thI>mx9CBjx)=n9w}e@UR(wqECk?_r!53Qbu!g
zgv$*YJ5Dn&Y|2cJ(zi=4l*bb<_{$TnppvaTu>U8_M;#C*4cYW5fvZ)<LSC*Rz?6{!
z#JQ2Hkj&x%bfFQfid6*}09B)Skbodp6QX@JL59G!D1Aym5S?1TnyR`oG6XABTmk@N
zXVpOlaOryht0k`tfZpgsfR(lCAVc6heF%Tj37!W8-?h-uAC2*r0f>hEE*UaWX{iN&
z(gzv=1TVP>f{3f7rD(1QJc^W?K7`e_Lxyl*osp5F03enau)bCuWB~C1a6$kOHCKyW
zl$8O{S3n395aikLy92>9fN)u+f|!xN3*kHjfg~72!vrN@$gL_Yb)pXtoMkeIs{2+|
zf0+#21_&qRPKe4f>|X(r%N+v!JY+fwPLeZL3*a6A_{)6&Nq?J+)CdR?LkpDhqE)1S
z738M!dqB7-Qvrs)SXr$q$Pjjd2PHBJ^r9214Kjcm0B}xjfq3}4EucC;@Dvclbie{&
z)nZ<50=J{)(yYYd?6*h465}t)1p#ukQUyhqcqRaqBrOB3Gzihkv?wR^&XGF~oQ7TU
z1!mcRK?&T*9eD{TK7tZL*@|XhbXOH*0Gx|j0WvMIgj_XO$q<ae2TSTw@PnN3t9Hoc
zPU{1ZAO5Pc$d}7z0>Ej}1-a61Gt?p=NJNmWjo+1TCxB8C4Pp!gSFUzq$bEPW0RHkY
zh<^NT3+OW-NP0rl6kyS(RRy^T+z$v>MH}QNzuN{MkWb>qKW#!P062*j2mpeFzuO0D
z1q45t7sNoUHpqSEW<ZdrAWx;=9ZDbUI7v+qAVuZhB_j<3LV!#LQCrBk)r6%^%mRWm
zB}8rU^m{4uGiWjdFYKQ!X?#&w&iGkH`MF$<DgfcE=!8Hmo`SBc3Ni#P2ZS5?WLQ;o
z6p|tQ3%Z?<0f?FTyF(cY0FnhN#H(n(3>nE4gItaZ06`i`!HZ&7bhSZ-!0!P;GMxlJ
z3R+@lR~2LkC$MuV%}Vh=P~dl|5DNg3Aund5XaVf7RRy^PGzJKP@<52Ef-P4QWC%O~
z2ulpT+S&qx48ae!jwGRF3J88!O^^Y+0sx5$q7t@ne_Pe&z|OcTi14QXxEv6qN%^M`
zc47gtCIGQ(1wbQfg~0QG5V{sRUVxbsvK9dJZmkfwZmkghg)muDC*sx$fQHrzfv46A
z!4DI0O;n&afN)_g%)sSqg|HRcWlc@EyH)_y3JB-cM1}UXLU6!Kv^6y$VXXjYXsr;8
z@j}W|kWpw!q*oneC36&S+8(S002Knly|qB#mbJFQXOEH5&9yXv-U7m-wLsurKv1m(
zLKt?aqSgX{#sFbWov_6F)hBCdLIwaR*8+f>00ABgf6mY+@eX%Q0H_fV)<j00T5B6_
z;pOz2<`fM9!kx9yalO6XE{hN=Vm+~%AWN3Z_BsPvEF*;ND`)%)h+K{u06|(u2n$*W
zI5}sv5V#T$yaW}50WYH1e_TzFF<I`@qk;f%Y&Eq}Mh0*b0Hn=J7#_lVMEH`dD#!q+
z0uTa39|RpCDSvwz!0!OTLqHJADFFowyj2HLM?|q0g&iz)O!P$raWzJSJt75h75C80
zQV>_s4_PsZs~Ag&O6fV~x>O$1MY^}gRp2^;D1!b~aFAFA3ivLFBK*l!!0|>D?ZQf1
z3i?+89|I-@{i}F>3Q@G*4^i-AAfgE3D)NI6#q(f95mvg2@*#)<-X%oQo-2qVh^xrQ
z;!O%ax{Ca3x_ogJy!AkLFRlV^gzo-FS6CiK>AL*?AFqNJsaV&6|4$NP%%}u^#9&hy
z{zy;WA|V2agm@x?nJ)gFcYo*I|D^f(zkA;Cf+a7=Q&5_R|Ajw*XKW-x)(5T-zBPE;
z)^w2iu5-n~urN1nJi7ZXG_?v|S5;gd$t6#$;6>C-^U3j4q2j!Af&4WtdM^!M<S8{z
z(qI&IaV4CSOsLCMbJ+f1=vA73&dvS?ScSlmgJKJJujAQ`RC0|gc2d>;bt&;wM*|lV
zxXFu)spC40{u{n63vPyGRJs43`&6suPcB>SQq`95XzQgOZ|;QO+GiSx+{*n&qPd@X
zlr()RWxKTjQlWz6px`<`>h!<Oa{;^Ie>BMLa3E@-N*+GX1#ppWr8#&c<#F>p>OBG%
zuafomUQ&K4<)4A$<^a_?T%$t%Y`mS{P;J)X3-IrAbm0$pS2_49HWc87xzk26^Oh+s
zG_f2MS8r4C`Bb1Ogl}J!r`^q0z+DkU@8Vp7voqJb(8t3kRQ%N}5(U0^9itvIHFa#`
zZ|FRn7+ZMZI#=DceMCk6hiDOwOoeDpE_}Coe7NM0L9c~@B?kp{(@+qP?M<V?)EF+q
zP%S*;kvOh)-WaA%yJ6$Lo!=ejVJb(LXU^V%E7h0v)Dh)<_>M*V(??H^a>)s9L`&h;
zc=v81zu9&>h5L{B@&^Ny`lh)e%zfWO8orU`bN<kMF3t|jhObs*2z>B*xt<ChS#Ym+
z{4-v>!M*YDR@;6L?(82=^T<p1lI6YR9#0;nK39(*E}_O_Ztg*6sKkz|sqHp>GQcV9
zC^;yi&j0iJJiI&MCYrmB->35I6<*-d3;W=H`jHE1amRh`@}ugQmxtVV)Jweu3{-f_
z+TQIne@myy0DHI+_P{AIm%HIO*+c#CtE=C-hl@^9AIM*7fLUlpIVdcsEXUYk9~=zl
z9pI<e2B*yVmo{+AoaU)G>Nv?|S1`p&;E~_NJ-z_@#@lf_SZ`q)5Z3T0=kGWW1O~1@
z=gRHdxY7IwckQMj3upTFm}f2nWTgReys=ygicKndLle&1;PyWV@5|07m_2zslWoqQ
zZQy~|O0_mT2KFAI;b1tFjv`jVSFp>33NHZpIF*AxI*z(x2tJ%dn@e5cmf6XTFLErn
zbg$)6o=ppQ#1#e&5cYr^6g-{)>ha7W8a%)EA^ZVxEMfa8DoF;?!7R9U7SjhVjtYk}
z@hlZn#W7ndaP$*Xao{9%+HB&+<a?RxXynL=BinANxenzQHf3>hq!BexvjJZHmp}?7
zPwfo76cljT7T&8p$<aJ)!X4`{8)vNeJ$+2O>-H~?d&1$d)eU}#ZzzwxS@RqXT-xnJ
zqc;wpq{d8!FDu+I^VJHT3B>R<)8gwSAH^*j1FXWTm4iY%UQZ8afgg+L8;|pt2b@T!
zciU1IT!eYXJti4bvmzRk>8m^3iU&a8N}f*QO74Ch1E12bD(bq5`A|x=+<E+V63+&}
zb+U`U(r2o3G>YQ+oiPUZ^;b#?9=*+<?ty3CCegeTf8vhkJ~UwAMwO4QQK`*o7s#(k
z&X2{69eYk6?UFS%N{mSp7jrG<_y9ahOU<c)KV9MC%``^u{qVd3WA+WA?+g5V%c;fZ
ztO0)g)uJN#miuK)k{0@QF%g3Jn+1P(Og$&=Q=Ze|D_qXAMoO-B1mnHWO5jN&xZ~FF
z&;uVDhY$4vl~|!A=sMRV`yPbLXbb#5=O1|Q1gpGrzXg&_tTRBZzf@N2QW?{N4^_v~
z@F^?Myuo+h)-Z5X?LK}VNAolWYAmGlM+{K`Rl{|<?l=bhBYM^WpWa&d!%q2!bH&Mx
z5AV{*i48nq#f{cHbN%W~OxQZV-Bf6R8z%^sf&!z{ong=4xH~#3Jz_I8Tcp@=-FkZS
z8beWYi+iqf^UeLQA`b2WM^tjhK1=R-7(|m+ik$6NFg5GpBRhBU3ffS`BlrHH8a2G0
z3!uk_A;k>^DPDIi1%=UdYCjAY-{}kV)(uX|4NqQiZ7{b{O*+q<j&|mjXgDEv;9o(&
zMjE@}1dTY!Z`}FAUFvtFaqfX+=%?a+|Ipiqc3g8}FVF9Q+Gsc7DR(Y7V$fq@R>?s@
z6;@!;I;fX2jCKouOy{PXC%r=etuXm?ASgQz&S~BS{XBjOv#w70+WaN89i`?tU&yv;
z`$L;Cxyr$DLp<WQ=g@ARfa`J3&V%=;ArX-4_ZZ;Do1vwku#5`h59!e$&S(hD(YrhA
zF>fZQ(F+3o8-#P+hASti;#y4ldT8D8EKGWOw~M~|od=sOLfXBh1$%hnx8AsA>cd4;
zkzhd!$33||HEKJY-3{={+Xw==BGncGxy3RR%@$lVKqELqu+0RCN-9slY?<PLD?4^%
zaOX4XrIGuPxR{8%5EXwzNn<+?Q};<{*Bd!|A+e`13qP4|eFc7iWSyZ-56|x~i;W67
zC@!VDxWA+h?tU5`63p+p<JK<tNPpsqkYn4Ug011vK4XKfeZJ0}RB{@_QH>K`BQT&m
zwr||BgFdBU=FZSNS1#Sn9rjS84d$2;?RMP5MO?ALpv&^kh(NBO+(!>D&?vVZaF4u7
z#i#j03`FLKpqG9SPt_2R1N_!8lfT@ik1o{zczrM?=}5BknfvJ9XKwuG=t(6gFlvHI
zz0|smYeLWC9K;t4(g%~f++u)OEcfN05N3wYZnUl)Qlnunwb_ER;H_S6XYUZfZ-Ovo
z`!KnyPTaFT0i`gt=}<CkE$}+!QUO%y8ya>8o&_FGz(j(LG)48)ng&^$m(2|x(7=FP
zN__?`kT-!uD-<Wsq2*KD2i-iKNmHAt(V7MzC+83IM>`(g&$H*?1z&;=7vI|F66eWN
z0bq-12>l9ra~(X_=M=G@+MsmDp$z8uZ8*J257=;*${vAg!_*p{^}qo8F1hmpyQ9b1
zkSBgLex94T@&V1<gCgn)zOWx2?Xj>E>ZuF|4<N>7cp~v8hQ9G`7|f%9%dlp?o{2vc
z$@To@K3JfG8lkQS;ro1dJK8h=n)ISk5cUSR#ipGc6g+zoM-&cYW_AWr)m7__56oeE
zKHAN_&(6^3VQSk2YoyMbhF<fc@hLdtzk@pIyc=rr4B7>aI*<7~?}A|%#L<jU*La?@
zNqQb6tgX$y23W-=n;aC}m$sh1rEzBxPg=yn_%5@j(F@QIBZ6NcH7hZF-_ZEcjTzi`
zX6GeLI+%gc{P_{g#-=k^3bet6F#?a9AxC>}orZRu;U;6u$}hZ_#-GojZ40{$8i2PE
zO98wN*_eM6qJ<ayPMPfE_LG5EFjNf}sS#&$-dt9D?2qQ|B+3UDl!ZLb;+6y935TBI
zh)pe(-*w-3@eo>6v*`h~#shOL&Fy=^U(X$a_9{2PD)toRpx6T&Y-R_HtS*dJGY$Ar
zuVN>6yg)C?(_cV5)LL;3bbm>pJC`^?R}@j#CaAaZCk*4>3}*RTU;eo3rWy6dn!cpD
zbum=sbq554IY|rjE(*h6Yk*#CWy(Q;`uQ+>#$)_3&x&9QEb_$G!?%H_n`UFNW+}Ef
z!zCB!n>z8IRGJNj5UO^gvLiUC(Zp4^jNQTA4G!yF?sDw`h7j!c1ra~v^bS<_`*a?-
zK}810vFE=O6qqZ6RCbnnc+nw)-zy$cZz4_}J%>pAMg?JacfW=J7<+Z;-f1X{eeRi%
zSk!mgI)DrAQ0}ETYP0~8)$T?Q>ULv}ec2@}o7saGK*?fyZ-Cc+2`eU->vfv9QXB<e
zyyZf55>FhB+PuSO3#4+X!)wT`Npz}~ORolmP`&xSo7C`dJ<c^v#s$La|3Y0i>1`Bu
zztC{`&PUMsm0kqR700c&X>9*@-QA~;7RFWaA?U(}E%E&R290btz6Iu-cZX^9;SNm&
zpLe4nFc(%9D4BQo8NGW5-BN|)VvV*u9C{F^YCbsyMqvmV4qEb?+o9a{Fmyd;Mg=!K
z#Tvc*yY<h01LWfvV2JL0Jam*w&+sBvP0+dT0vx#cA<YD;!c8dmGWEdJEcAYM)R77z
zsNw`QMpBc%8w|E)v~BD$bneF;PkA`VGQ-v<@T??H8e#oZrx;NCfC1(i8oUIm_S%Hl
zB+^{iwG<l7gowtO92<JSoBJ=PVCuFWb-Z{X29x|<7FBNa2mKc@j8#0ff1d-5+n$20
z(Y=42Ik(ut&Kc*LJB}yO;etQ|yax^Nw%wu5vv<s48oa0c%cr>b9_59n^4rAY{LO>&
zA1huTqq3J=?|&i1iR;(hqM{?aBWdJxCKWmDzhwp!aoC4SE?dBmg@ijpg>hVQEu4D|
zP#)4piD_TPgO{l0!1mo#WKM++H2)--^Dt=(O|#C3Imz8Scr@o(VC*i+OLx8=3G0$(
z<LJ{(2<e5C)0^C!&MWv0%9Zi-x?6EkFf=+~w6=4b0a^zGv~M#|B_ArHgIkOq!LXWw
zky8yZ+(qrXoN2_;KFy430>dfKAvG%JHRYY%>5~M$n#u%EHLp*~MiU3PAZG)Ons2aG
zhdhjftQ_G|Q_E8ZSPvUuZ8v%e+i&pHEAH8TI|%I5x5wv&SjTx_?c7Oksmj3_w>2<`
zFxC2Cs(syp#aJ~|B93T=Q|s{;TxuU{DW)q|J7vL&Yv6h;V3IdEg5qr+M-7l4F+kpQ
zf^vgleC9!=w{Qc^+E8gi`q8^i;oO4m<1i@9_A$iDm%%O&#x<wc?WI{;BT%*#rbI8s
zVvgTab+{jNWK}HYei_egFo!7br@jQrg)sc)Zf4McqxubiiXEfb^%*qh3Hz;t3vYnj
zm2MVXvzKRICf&&5RwuX(q{6I(y;o}P1aw2^AA;-0FD0G}f{{IZ9CO$dLsW#dWtC?t
zbz5ayAK6NyFAT6A)5m&(+j!KTyXk!t=0?|{J%Itxk0rOLDH3XCjy}1gPXo98Ab%&f
zZh&AY#nHd{M62`9l3*EhaKHU6SOHB+E_9>ukgcFg6-3P2olr5O=9W)6*T(?&aedq<
zJo}I*s2nnC1_n&NNBI2*M;YDXMjtMWqUqB->Gq2Hwo-!y7WRFRt~C}Xu{`O)8Cq;`
zv~bA*9#*G3Qdk6_HE(b5$1_K%SQX9h?6NU72B=Tyqt>7&{r8|0rXW6Rk~f^+9lqa#
zX0~y~1zOz2mA9zJ3PSh;hH{$QZ8y@peF5C0;9lPdo_qlf`0g3c!a(ogi8K{t4h?OF
znrgWCoWH&_p(2d|W=DO@)PEKR;K(}8J^zdc+(VuwKtML6q;QjOI85Nr=zeM7&3ioW
z8N*fP-0cDDuOCc40RDRyOhdQ0)C#MHv784Nsmcy=ZW!(C^M%lUw~=NaQSEsH+)f6#
z7ogM@Azj~~2X8P_%R$w_v(K;-)fdbIZhmNxn!oI$zPlLbfn-R4X#oCg9glOS;|W{V
zYp{my<a`w^Ch(jCSDnTh5xmwJeGG>2IDOc#k*7$UX<Ocr6z?zPpojubRq`ke9uOkz
zOp^CS3-r0e%?Y<dyN+W~$6q37aw9FAgdOoQb{*xPi^lS#gp05kf0qs#j8guo@K<KL
zu#Wvgi>ES8u|g|%fT`6@qbCe7%Wp~rb_Gw^QX7qNVa7|IO!S0Yn1(G<Mm1p=#ZTDz
z`Q`@`r84Nw5s>H&lv86YOmE0uNP}Dx8a<ywWolm^o^ZdKy6x#jzvD*Seo(C5vNNKw
z9vC)cb62^?0<{FLi$^`F=LpDD#`EW>=;{3(+~}W8wb#+3>4+e~QaottgeAyaG*>uh
zLp@;;$Q37&vAXRzX>y&vDb1+k*#l>cUl|WOT;c-ftB_VLJYdjmH+|ff{LUz2Uz@sL
zg)|<dn&bD*_;6kLZ5sX;?f4Ao{^`PF4HrE<$4%#{*5zIX<mwQtsNV2QYCPZ{fnCg}
z-uv;OFd7r3l!i~7p~*e`MV!7|9%FZXl##o6R$(1z#GU)WW3z`(9mdS~x)Cb2#0Sf?
zNpHx*S}d*J(Z@5K4{_1WP%0VRoVq#V^ut52i{`j0gJxqQcf)vSya4_xy?V~a64HL0
z>Z8L9Fyj^fQc!q1QRS1E7@A9R=7Mml+6xWX!c(5f+;5TtIR|~{!et2nG@Gyuh;b78
z>9){2{{mqfh}}R#;g6ghxFT}hMM%{F|JPXI4T8NoZbA8#c^M$~TtbTx)?k_x1?)N$
z0Xd3ogVWrTrKE2G4{3CVr#<Ikj<o#)E~wv%wOz-4Xr*47c}%TbV}>?;qdtg{1|Ff&
z=&ZZ1xHT3HtG>f6_Ah)P$%~F+$D@NsW$a>yAMPDXLBTz{*S&g8pPVs^KN`byr+g*s
zlP(&?NpL#|^<kJs-RL;=+s4Aq?{vhI`Mo>;rRnwfAi`a&=UOlsIv!j`twv1r)+=6I
zuY}n7OwI08XTzU@@G50_w1p>u9Dp6tY1EvCFxKBP(bvxxZNO_)_Lx*vkRScryMyLk
zV^q*8@2+m$>$1lTYt07g$D5IC8jiU6>KIQxhWC&aRlI@`O!Y1fAVxnApN_$5<Bg--
zZ|M~kJY$xEA}pQCk8Q=Y$-6;KJe*>7gKI1!ZMfkkgyok^2Oh+!ZHT)ujBX*19NYwW
z(^skIA0FRidw{FVZaCApl?H0G1nYxB%ER9Kb+Bpsx}e>WhOd%jX9<B^am6x&8n$6;
zp!yNN&BDT9%rSzx`MooD-lxSwV60ET*prPNg#w!2c^n&8r5OFsH)tw~$`wh*>5%e`
zm-y>Fd>z0ERO;ec(<|vhw>|-beDpNjlaw7YB;45tRn+ZmxsL|I>|y6k-lVR>FfA5s
zwhLq26&r+5Q|YcQ*D;UlAQV4h=~0lw<Ij>|p49TEy%2%DhtGwf%h){5#h$F`127qO
ztHEJMz(4u}EnGk3prDVN8Q;bz!VddO8o|W<xDG=;$d&0d^vsMf*=NDRAL3|uD^&U?
zn2g;a0o;H+ga{hi>T&xVsL~P2U)6W91be^T^)U7E>`kf(aptOU2yfVn5|>FI%@L;%
zmV#m*jH`E$*uzwIkOt{%vR_78I8~VOTX(Kqhs8bKd1y>vaV+A!O3)^(nLq5`kqr~@
zV~*Mv3|h~_AqiA?+>FZ~;S~@Ds3@DujJXbi=Np$MOP$u@J+p*W=!}NUJDa%p)lM$i
zwwuO5&}OV>-*bZrbVPf=S*Wt1U@G!-zKh|-3o|N?ak;jU^U?o&6s$s@T0FSzNR%;u
zy@_d7d=h4AJGGnfn`>Ncnn(qk^iT^28#yRwI+G_&V@zQZwmE|{KJvH;^?HQ><RX7a
zfxMY_#boQF;m{NZns|7MM>Yil>u2X(=cBRJ-NY5?Av~zIzp&ln8cpv~Sz+-}3GLl}
zJygP}ken^f$_Q~)z{0NI!o}7>*Rc&$kHM^k#kIJ>_#a2^!`3y1JlFbFB10U5!tX5T
zqYX%oZ#Qr&x?YP^BYiP~Qtb>f=7N8qF7kJ&_?x#^`2I*x>J69J?c;VGa)=Wne|_Yd
zOBklf>(=WnxaG17cF?i0w3kbwVb+05lY*fod;d}T?xWJP{N+qAFxQDu82Hx`_UI(n
z9k7JCaLJo%9-49CLn?RLiM`1{E>GMlov}BDkCKp_0s1&C%^~3XFsMVAs+IfiJ>jom
zkQVLd+_J5_v-qpe$y3~Hi>|eD=XJ*3-MuYO`7_>;Mu5L(Aa3*Taeern09eL6z+bnR
z!NM4c-gCv6-k+u_T~8?&C<FD;J_)h70-DX-#@y_OSRMk4jzwnjdsAwA=5ZCHIKV^3
z{AS$^bZz(~FFO6Z?Z(k<G<FlTpQP%40xj==Z#$tE-XA?^Zbx$gS0Oqsz3}G|38|5p
zJ@5tT<FuvmD`3%2&oKYz@y;SF;HLRD9yuRKqsO_*!}FOJ=2s(31nl6$z7TscV<vc2
z4y{=S>#H_u2iAkF(1sQGzQ?IOK2(r$4w|cpOTDsBKuwQRhZePoc#UyRA2kH@fG-x@
z+^>Kw^)>rtDlNFDbLqw{&#7`V6>K>}b?YEa#geoK8=i~%QLGTG{U(GzNAO^9@Rd*q
z&&PP99Bbtdq1WrZD-f+3xR9E~H4;|PSfD*`fVOT6mi*tS)nW^eyHF>ZIFEsD1sgBi
z4Y0`KFW7z>1lQI=78dS=Vb*DeK^^7}V*<q~--1Ur!X#Ye1#9|pjXP5|84DAHE0Roj
z_7XRyDo|92ONE5|f<AIgkumISG&#5*<J^cyE`E7{#y9Xcce}fuoM)5y0AiryB#k=*
z;vf(1jC8;YgFF9H)APeTnYH&?l+{j{9Me>^36rV_l5*|}cb<+2KuWu6H!>4%gk@hl
zfm(4GmV?Jm@YK5tTpj_vUu&20z!X3#xon*cjIN^VzNSVzEevP=0LpZ6%h}gd?dL@u
zn<0R^F^mJ}Y=a!Ksq;E~10dg8@W~dgiv;6u)G?Nrw;}pHwZsbQ{rU5o_gbae*)q0Q
zXYoEATEF9Z+W80J)Z(wecb8saV)Sl4XpB*w1o}RV^oJPgi`s#?SAP!c&qQ8~Q$oJh
z($r&mzn&W69jNoNgj1^&F6!f?d93=l0w3Tx{cOMBPio9~0LG{ThmS@?a9tvIg)6L%
zaNbk;3`&m$r_itk^g%uiC0x$|p5o20_anjP3()-C=dqOQkO(8TOb1?Lr(Z4MHiQ2t
z5Qpj`zo_IQo9kF}HEzG+N?nOOZ52rM|3oT}<1Ju5_97N}J_Vb4O-8n`W{N`jTM*W>
z4NtMwoPeIKqFlU?vc5qBLZMc3i5aBWmy3~H<%8Fhb-ZZzSSB^FUed>WpGs)T728W#
z78L#SjPqUG)L5vzbqMD6R4Q=;^(sxc0&5g%`_~Q2v1Sh=jNcoouvSo^<*TDKx*?V7
zj&jcv!QEf(QT-*%3A{D#;z36zU5~{?x~z{j8pdJ01%?cJ;5-5~(g@*D<>Tc*A8kRj
zFAx{CN5vWxU`DCo=z$BF4zcU5q+#jYf9b)&&?6+&S26xIIY74IJ4y2osP<ee25gwi
z?@`6etGXTw0WG_OiWVqHd~vFj$|E;ZT{PA1q0-I!LE1W3uq56P)IlPBv7>&~5t?xY
zj(R8pY<z&ZOJUh1P`)=Tpb4Ie$Gq>p6oge%6Xe;L$N8Lxrq<TB*pR7-(QY#unMngJ
z`<!Z#Pi}Y(^7WtrbM8>;{a}?M&b@?*ISH0qgp8_sc!e7A?Wz}Nc;xoFmr*{@Rf~aC
zbL$n4y@Yrkb_)&X@qKB&*eEHd=|?vY*ueCgv%)OokpsG3`vaTo)G8r{F6li$4fiAY
zt$#2qBv7`S8?iFPPRtM&WMiHV@$?C7nbgqxfHVtQ%%sj_^sa^TU-F0W3mBDtwQ~T!
zfhA78?({({RNLs&qu3NIRohKz;Fu1#80_l;sV=}{iKg~m#k3se4_P#p`Y6ZL7VW7p
z2~2}gjqg0xQU6{~Dvdn^@mA&raCq%`hvv><`_1_*&)wmV_o>H{r|bjoqTL<zarXu=
zNO3TaKYn&b!fGOk%P<)wu8Kqq=ZsNCQWAGupz{4x`-E#vv2}k08p;VnCbqB#IPVO1
zT4Wx3=znGxh*Aoco3f;~G@1!K$z56WWhb`_@v;q4yzwN=JbbD02+v*MqErblMi{b0
zoONCb3a+*1&vw*(<BacPc!{VFpjQRSA&~OWSADirwcRA_wGl+VE$o6XuL*A%sXgT>
z8q|Q{Z{JF}Hc9JW!F0cTp9-U1QmJQr`fi7abP1vC{f1LIOF^*-R2iU}SgQBB66Ag{
zhDwe6(35@-8r+Xn!_*OI_zE6^6WCSAi}&>Y5!bnbqr0#aqKQ4|U}+5XL>+`ZH6sj?
zLlz<205-433xIt*kYlblvf)c;g>_eOmlm99#^@1!coZE;!+71t)lZyvGBzk7h=zFF
z{(!UiKv@y2)Qa8w9;Qh#&zvyj31Dc0Q6k=)O>I5*km`l`@h|s<*q_<NJy(?87@giE
zJ<PZCF)N?NVm0!Pr-Co+wC2_XuFvN7JN_^B0L4r?7oItI@+D?%`;8oceFIGORKJx9
zHk{>AbbEn1uddTjc_Oy_XgD&2$`4b8iH2+TfZv;Rd*{0Ods`pvD;hs&6ajC;8J=;1
z0Sc#rZQz@{M_iz!g3u&r$o%9NR332d3RP;THi616KjpUT^!eCXjKo)%4CQHeEL2?N
zk4>#v%>7}1%fnoF1@95wa`|ChcZJZpqmLJ@<epRJ>%D2>CWI1R2n8rlJm7^uyxE!B
zw4R6TFulg`rMrzj7jqma?|SrQ%P35)(gR^)BWc&>^-v+rsfVf7Hw>h20y#dP!=6Fs
z-t}6Pl8JR!A0^(E-=}tZcSN%l&ORrPD{oMp(5;h6JQ~9jcPJnF90pJg7n)+<<-@fY
zu1*bf;&(LT2!A>E5c+TtewUE%)l}d_jbXyvZ`y420P1sw`z5rPK0;dD)5pt=&(6a+
z0BiOTkf)0pXv)e4(x(^$*&BQd`zMF@gVCPtr4JAJYZ~=$!XmKb{*Ij&sf3}#Tfl;K
zN3FMKJmvRKvDrNQ^y=*^RIy>xQ|*gv?V9YqK1%mgNYM6Ocyst7d?Q|E*FYcif<OCc
z3gD*Un+cWp!RVwFm8EU*hRtg#zM2Q0PVbA{eH^PI?!5{dxWHsTl!@(4v4;qq{|;8d
z%sF44htTepx~o(D^63K61o0MS{NYYdk5k(po^iz>jlBlF3UjQV^VG8=SnJP1V1$CL
z2D2a1+k0n~wh8;W2>WRJBWd~~bk%2Sx^ybs2A>YZ+M)fn!kH@{ZR3V}5<;z5e5l`7
zA<`-D0ri=0zxz_G8S|()6LxZCF!i3`IiA1hW_FSLj?(8y>M>mpvjA_svNuvS7N3KD
z>$v#-xg=lErU^TaN4Ufd#41RN=6-x|YfFfh!dk32`Lz@jSSI2<l|9}&!y=)#8?Qqe
z!ViUq(|FpBOB2qZQEk}u=z$_^=jQu7=)(&yK+{<ou)!uk@5O`Ja7@7*`Dn9=dM?If
zslq|t;dmHZ6T6K!bFS9H(HeI0F%5xV5d^@caE&+#8C03Y<953|uY*bZA?(^Pl|HLY
zcbY>6!K9^Jo=(N3{H(1rH$=j$f`C#{WehcKafKmNoRXfn<2=DL;r!KDHDuQpUn;Pd
z2(2~OBbPcX26o~ql^+d^vH)+j@xWCpHN8`x#eD6jPj~|!0)>zFLH0Cp0UIjwkii{W
zpE**ENi;HYH9VC<WBBy#J#a%s^g~pGv#jPks8~Y_r>NhD#)9-PKi0=gZ!h0;%;Nq8
z%%axwm;znf@7=)gy?e9;mWz<gJ+L&I(dB6>z`IV(Ziv!h7?$5Q^N<$|f+8M0LQ@;@
z44;YIrnU#E;J)cj_<EuSilmA2d@MOvhh8#rqV)0KN_`4!ll0k?YJ)c3KKT$^T3eEi
z`d&>xN>iA}@TS}VyY{n~S0lIWo$!%J8rXo{0qlxr10|#r_nwBR`3x5@DqNot$@$ql
zcb`94J8E}oAs>M~S|2G42kyCapNjVJCs(WodZFLOA5h-ELJCq3*rh}4Qx(yWt&pLE
zd${A6)m2*f2gnwA<~28k!ulIcpausRk@NeVv%oj?N!XQG<kIcN+x%m7{S~q~Mjvw`
zxN;#9A9M`miaX|4so*xxSn!N9cKyb&BFc4xe42uogbP{#jUT7R`;T{EMb$-(FfPAA
zzJ1*jC?;1#Qc{8oRKy^)1z}Crd%`YqgFlbs1GW-oS@Xy0<2HtU-^6ukZZY&e=>>O%
z8Q;ZTGk>**Y%hestc&wKdydPn^m;2CN<Vw0`f&@KR_|i-vi=-R!PlL7Rn&KIk2lrB
zrN$VRL^C87mILWDYWT)UsgIc6VqM97TX@uhC$Dfv0Ds!dE!Sw|9JlR+pzm@yu#H>Y
zt)b&eUh(2NNSMASH0*`;4yrK8dmbD}1OJbl0cVfx*`j*5m+I{=!zTR}hy~5t9D^>$
z>0^GuAO3~Vnmdo>RU6(V9LEsBXYx4Kyt$m4K!av6+xU$WmMTruWybBX+we7NyO7;j
zzfp5a*b_%C+71VntZh!1{UaXcT+Zd$(1^WY$@&nQF~IypAM*|;uCTm|1s`5Q6C6OV
zVA^~=!yk@h*;|H0XP8IuuvlG`9e1YE=MUWCd^X@~<=E59d=2Zd8<MmiPJY35Xc;_8
zLNV~8gxxkUcFOk-eewEeVZiR8=KEBaL?hS#z4to#6$Yv3gah<wrvtUTg6aO13qpA6
z0978j&7Xs*?l}#G@mIJ3QF+E)YJr_OoBa?y|AbjN9T08|3)*5UX5#dBz8ETfyy!|P
z7Qa1Qqk4Qco!?{~4={nY!WVjHZy>MctT&Y@<FWf}4FT4iMtFzk1p)CI?3EV3FU*CD
zjA4NlX#8MtHmE$-`BB~a%xCwHli`MNf<8`%>r{RU;;Ahy(46|io(a>~?l$%aZ^ygy
zAQzqDa@bKknUO{V-jKwj`?&i$)n3J!4>+2G<s0s0uYc%FB`+a8tM@@{wbA0G?Qz`y
zoH`9g_NhKv=$wMHl=t{1K1aLZ4irV_RSZnea|KuJhp|-znO*MinwtF_xYF*F#VIiO
z$E1v;y;SoQtFFT1&-W;{q&iZ4xKL^5uEE>Q+$t6AsU+d_{x|&hDIiu|;tTrX!}j9p
zBoeC(QeE9Q?f2l62~Gk?N^F(Y{}$6Hpxs8@m)w6u7Ew_Cu>dFMm_*g@4AYMQ{x5ze
zXeIxe0T<%D1P+S)_t{clk$(rl1t$}5E`oB!L#6IV5!872e+BdZ;-UK=-!*9->V6V}
z$Kb+_LH_Ua*8g}i{6A=*SRIHUUqxC_U;cRuDzTR~ApU1k|F5wk#_MnROM$->_)CGm
z6!=SlzZCdOfxi^^OM$->_)CGm6!=SlzZCdOfxi^^OM$->_-|7H&MsPzoSUdkn4i|D
zW1LjUnzX3IM5jdE?}$5vtCdPWdp8$*jq15FCGLd1hkaC{>Pd1+9A4y!zzgu`(EBHz
zJw_C}n|R$Pg~#|S?#14$`~|isq}S;p6(b>naJLWz#fGkYfJk^k$iT!gMUjrvT}^Qn
zADa?@h4+BS6A0uFaEVv@B2T#5%OAxOufRn)yh@Xv<6<O;A<Bt92_HETk`To+yq__G
zkEgb^DY%Nt0)g~RR9uZx5yeA+L3}SpwBQ+{cqUr64^d!KBZ_BuAE5nes(>Qiq>6_;
zq8kX#O~x&P`RR0bH$M>kXQW3NQByQhP*M<0P+Sm2G(n=OaF!MR!NObj8lpH6CMfHI
zD9*Twv!>z*rZ^2L4kaEzJc@V>@i^iML`Os?MA2N)M?n!mDM3|1Er}4q4NMGyAci2j
zpp@|35rj}73U@6*dNIU;5NU{l?4tjo=Z_J^Pz$n0AqpahVU9)=K77KjPI$S!M9f0W
zM5F@zd4-sR_#E*CBISucx#Ca0_){eQyuqJr#Mk1c5P#AUuOf;wTjC6rKcX|D`09-V
zVlZMLVhG|z#8AW_#B+$}5icNKLA-=0xY7bqjH?)DiCY4Y5Qh%8BMPS2g($wGv=4DV
z;#S0Mh|&jjrElnpk|K&)JR<s!U@z#0@GE8zq>n=sB=kfSC`4bjAQ~fz_KC3*W2irV
zrnp~^Xo4tDW5G!4bWt#p$QS*R%ZjvIR=x_@a$3F$-jM4EUJ*Paa0@!gWdvCSdGMM{
zB99<|HKI%&0Yi|?9`Rqqy@<Amdk|$9^3@K{>=DIy96%I(7yNx3@etxcL<huUh({5R
zARa~(Jno7p#@`Xq2~p&`Ad0zh6H&|?Z$vRiWPE>mJqZ|MkbMwuAci4cN4$o36;bfj
zWkkVSg6}RO3O*D(c>(b}qToZpkAg=75d9Ga9}9jygDB>BETU*{3}Q556yjsVNW=(4
zF`dM;64UEG;yuK>h<6ZgBi=#`M~p{&il{<NKosL}{TcosP|0g;!u<3DH?K$OK2R23
zOA^b~P+5Qh;P$Ea1I%W|5w1@7=ehLCPXDKxk?Zc{;e;VXMG@g8zx<=l$n|vc7P<0^
z{G}fXm$E#ZTwNtr0_)?4AD4b6-NZP;#mU791E5TcSE@CTVN1W#jyx|XS9kobq;PW)
z9m84rrJsqHYWg_2`2eFbNy=S-S9aSv96cM~!FkAcb8>TY@-Rxt61l*u|1<O^#x71?
zPJ*d2fmDPI%Rg9eV(jDO>LYShniMJj@$wJbn;5&I#bPL<qE%_xsY;{pdo++p)yEao
zqs<h&Dfkrgn{v}}VhabusW;19<Z6+MQI~Raa3ZrYB#etQc8FY0sYzmdF^56nLHZVn
z@6(>g@$W8??=9tv4oUe2zeKNvF6NFTX0-r1v*80;2Tw|#G827vadJhsel&#E#(rmj
z9t8Utda#Fw!2dJ!zsi@p_p|ykaekJcqbuME`S_0@57(bOb|Qz%{iwmj7`bjv=>5-H
zk9>)GYlFTr<({XAX%4~Vg(FG>!RKCcp0zP|%kBLEt63QzlWJfRuhUJdcnuCDE%K}-
zT(GT;BS3P|t^3H9CJ$G7T8H0*6Z6CddC1(1s0P_PB$jz^&duJNld_yK12CIK<)4uR
z`4T(+j3mgH81l#5id=~ue$44|-5*X8WJ*(r`q6AHqxJ~@SfxYMkzf;J!G?m5e@<+H
zI76YM2(|6ysZg9n&BZBvsn>2!KA=S*3T%}4q|U-OdF%E|7m<mXEqItJkZC0{_v~Ls
zpTg*~2V;hzVXlIU5;TsQ_$YT6)ByoFDdPe6+&(dVB%xqLEvPI_-$U-)x|KfNp)PAw
zc5!kS?bhf#?6zOzuG^cb8ecU=rl*#cX-TQlfFBRC&alm)rflu3ba#h%YglS<V~hpP
zQsA?y(KI2kwu_LhKOSY>Apdlf#k@d|e>!bl(P{uU;<vC5q<KYE&iARpfhN!RnixZd
zdW+xojZ$hHg{vXXY7OAuJDh`P38E?!9Q;Q;i5h8|M3vBO7w8jCHS|-v4IJmE0YWFY
ze!%~dsPz_9TO0pRz&4WFurcK{oc8;}pbO?eIcN}H^zLUI{)@gxp!2>U9*x6flE7QT
zw|bbn_%lvOEJ(~I$<$w{sHfS|m>b2x@wybTmFI3yqr`67u>qTI6U;~>@wPY+H4nFF
z7WrZ(U~IM_Uqa7y{{9X&sqiZvMJ^`q&-u+;JGW&gL>y_z#aW&L>fVcU0hK~oNV5bk
z;s7oUduW`-=KceG;18g6+EJCPl3LECIaHkrGH_dl#E<TRrIL`Hq|&6ppT7l%`KH3~
z;f8LP<!*7k+^=i!QLoB`A9MXKPt-@JMZ=E)tyZQ@+_Jlk>ARQ%9SOZHP0<YG?i57v
zvc{()e8Z^cUkS8!{=P#l>{H6MaK#y+PMYCDj|eGW0!<yrmq_-E-r!6r7aL*BzUctK
z7~v5VfPkUg%?GI1d=n1kjHO9kleEZ>I}Y&xBM6o90UGrw*C&+Ac*u>Kc4wIw!#tAI
zEB%}s-M}tJYMn#c^+bF=-IBTj#597SmojB{M5_a5f-*P2W&SJjzy&~Ul$xSRi_vI|
z9!BAeRT+P|Y(~P+@qr-ra5sw6C^MAFY505&+^_R(aN1-D2g6zwg5K_^ZIq})T?Fy9
z_G%a&Wt-RWi1m^N(nM*qZ*Wf_m2IJ__$<B5wCu!m)ILkCQSKIg0G_*?nFiBMsfKL8
z=cMj~e|ruX2S9asY88Yg{YjJ>pfN@jF*wDuxE=?7hn8#qQ1IY)0Kgj3q}YapG?Y0s
zovy)mkGR$i(80ih73`2JsSq2gJCO}W#5aJre;K;$^BH-~?%;kLHLat!AQ|Z41T&4A
zP+Q^x4VB_+s5m6rb?_zfAyfs2Q<n_=IF1i>sJZGXjXjj|B{rc6te>_DM!=kI#o@OW
z8gQd;z6$6Hv<_mE-^e9YGy^Al57Q*>*2`{?v*G(+hu>hJwzD`hGpARgf2l+s4szhw
z_%@qh{;F4CUM}Eu8Rw#>AepLhuxPn}g_VwwE`jthlD?W+nk;ATlQVH(qc10chs+Lf
z^>VhyQg(@gzQjjvIhnPb87ya-ZSgx!)5bXJQ@)&gAGugPf;1%?u2cO5oN49!<?LiR
zdnZo0&AQ&eCvBC>xs)$wb3W|;U<c@5f4%IwrR*juh`S(8C4S^Qy#oDmfn!^bxEtf3
z5)O3fWiQCt(Kxuf(bCSEr<QZMTF#{pr})c(1e_`QNF{m&y5#~~g3-#gdr6(Q=!0H?
zSrm}O6SeLUr-1QsteE@D`IcLB=GqNEt~_qLcMJ6_XC9I>H`|=UftOYq%HZPV>{D_!
z4O~z?QlC9c&C8i#a%SRV9PL;Hw^!jC0n7PMkZ&Um3eNah?Zx3q?z^^}?FE4=O(wqx
z6MSf?hKJ4glh75~70=2N28t>O^|2cOMK))>W9uX6sYb$>L$pbzVg$0WVSwyOwCxLz
zneXPZL(BQkk#8sRp}uj1ddMFqeX4k2xd1hy1eUy%YnsN@v563a<6vePK7Ew|(D)(6
zBNrT7FWR=8&2@6N50AXKW?_XB*;RVklgQpLHSaCG#{srx`g&jsKD4p~-)5JznE{rb
zj-bXI51be)<HhCtlgO7^$K3&k%y=^B92YL<-bJpx0PlU627TOc<h1g_a&8uK4@$_Z
zDbMy^P6kgNS<a<q8FaWLXYi?7e3&9{w_b@^l)!(=J-Z<+P7TRl8oQj!ACQIp8p_@K
z{F*U$;Zr2|fyAYb@d&b|k+H<F|0>rLJde{q^UDP`VgE|1P9<vbo$nSZxwv^b+jO7K
z)PBKVo^S<~;1fRk1>aze0JaE~vvG3mEj47*rv$E6LEHMkD8_>HhZ;cG3DD)RQhusW
zJ)PXVppSo2U0zvngKk@7C*=zD2Z?X6zpk~{oz7=LjU`7V-b_=glArk5yDo2?_!w@T
z7^P`qBsegs=B%kp4dbWMI$D+NEHW%HKpm%fXzw$cP18UxW+>H<_=7#=-H5xy)Bg54
zZAOJYp7`=~mIW__$PEjnb_uZ9V#Mx^>(4fCjQqvUgT^^JJy}Y`s8y+H{;5%F>_|AP
zl^LoGWqev%s^+9K?w-IzQ)fFRrzRz6oKn<JoW+ibv!irz)NYbEr9FA>uZc-lE1jcN
zsqSuiX<z^3l;}jIGcq%j8r@BNlqO!O$yDO!6w>089aG&ry;i_yP!5O<?j5z85~2kw
zfH1%d5ZVSfN(<E0Rn@j1RkaCJ73T9FV#TszbyfcpD>flkS5@0Z{l2cE!OLUli<)9A
zeV2(Ok7|@=CGOBGB|xAsAPAfM7qnVN?WRNRwkjg)b_h^MmqINUtQdhV!rk8Q3|6OD
zFRZ{D(k30MOIN9~*Du<dsX_5fjiYv(PZW+$kBd{P)6-Om>1lC}K8_i9u@)PZwi2CE
zvH&C{3;e4a7yqbObyQqjwzD!><CvVHP6CYQO35ZyCaKaOUEEjbg+Vz$Hn?|`wnTo>
zs~_VRp!C}V0*OtbU%(PuuBa(4=%CnV6%}Gql9Hm5V;z-Q5Gfknol+f#^-M~pqgolO
zQY&N9G)c;&6m>R)jxs7~g&t!o1Ss%V5&`b+0z-<%5f1=og?dY6P<tsYjk}uwA>^=D
zq^|%&TR`+s+N=S6v3vasn&@{6HT7@N18n{LqP}iZ2~~9|iL$?v2Hz7G)xUovhSU=q
z73lm5U0xb4Kv+s+3ScR}x|+ISU#_lu0Q6#;WM%By{e9HcCLFa}`=XLgl_jUdD#c!>
zi(|AhEy~Grg`Vh5Uu+%y0<p9y1-#M~>h9O_SG&`ND%ymjeye_gOs_AfFS_KY-9Hoc
z(vxFVF)Bw%VXx3T{bB&3e=Dfwi`lp`seh^~AxT$P^*^<i*s=KqRs-t;4cD3>(%hDq
zBQ+}KY19)XHW5>jSLnWeF+kD3b%fy3i7wfyaTErKV_M2nW%AOabampg48XbD6_9-|
z2E?YBB!^b0qmM`Grq;f41X_&GuSKg`2v&8BG;5@xj8!UAF_qGk$+3``s#J+Wj$(?6
z-JBI#@`KU<{e!1q`(L+9-pvCDIl0`%6<UF<GE~7|ly?6mbLw>#we;@5D8IIEmT12`
z?11uvr;@^yM$$hOhOaXsD4HC%LO%=(0<2sI)YA!fZ4m)S^~6b;oB@@Wniv(M)Kyic
zWu~Z~I>kge#;FpO+G36|@lmRk_+7skkm}!pp!g^CB2}G|mg1NQwYx$OmJ0#U@{Kf#
z9>1Kv5}6FUAc$c21hBBPC15>KqvLVuiH>nAv8Y}t0MomJR27@0t7_GM5`2kJ`lBF`
z>sJt;W~1)YY?Sr><xKyvG)JR<B}VmSHqhzcLPUsdUeRwMq{Qp;l|_^&0T|)}rWP;K
zMcoMTe%i^!{^d(p$Pz#oc!I^JC1dG|9b&PS>!?hMR>sCEV^`o9c|!*X<fO#yzmkEv
zjc8QUrC>*34_Y$XQ&ll3F)4{D*z!$DSH~zFlcI!3TcH&O<p9&*UgF+gDfwlbqDe~)
zSmp>pYwb?36TY0WLR++j07aW{ls1ax(bDbBuTWiA0`+w%h&8bhEFk#HnNg}N$JEpq
zFOQ`siK^uEEXOEy(sxg?^vY<`lVR=ZKgm=jySo`YiH%a@p}`|9XjK#z^)i0U>1a)?
zK`AXLgHkZ`)hVi2gI3AU^ji@Juf*@cE%isQ{n}?g>bJ(-*ClJIbt$RJ<nQ5Wn+N*n
z!PE8C@S)s5!>4i+4WDWol$er|{6r6|_NiK>iOKknr%9?*jhBZqaRmlg>V_7MTmuYY
z_&|>@Ru4_rswH5$`#*xE>-Q2Y-TiOEGN3GA8QlLSEQ7HFEQ9;sgr(0M0HuHXn~?Na
z4}kRUeiM!!TLX^X-EYFtvXL}p07vickKjntaS4ue_eXG~nY9E*y8BHyGUq_F0FK_>
zZ^DteMhi#p?hoOhFKCYp2Y0^-XIZiW%<|1|LRl8sfU<n^n^3geBBVT^EZ_Vllw~CY
zD9bm$2}Mf^?I<m!e-p-%N<>4J(!U8qO9pK_mePL&L(3^z7{AXaOGW|uvXs_qM=WfA
zZ0IICYrO|nP#W3-0HaL^OIvJV2n$5})<7w?=9K=ra=KFbBJ#T|J?4i>G_0l_I&{~|
z!!b#d5%XR45;c(hBsqPB+@y3(;&<74cpa749R4nQ3ATVN^5f%vT0%hoVG+F<`P1gh
zYW8OxNKMI9s)aq5sQhUsC4v2e+~xW6UE7!E#82A=ru_lLWhNG#UV%@(LugoUnSBhO
z>v7YcfCdSF1R0P22(TfMmLVHH|0B@L1G5Zv`Pm<V)KYSZ6PDqAm$3%m-?8HND89>H
z10uaS@jVi~tUp5V-IQDg|6Rr(0oRgRi05UIu#DkHxobqTB%6MOWhwKIkbEy6moa>w
z^GD#9h#^EfM9DJv?{n4+{s&?~t5Vh`7?#!2a@(~`w4C!tZP%l(WUlITsKMhu0<5ov
zm)orWbd_NBH9Y`nRX+AZ#4f9D?@4w}SV4|2I~M@P^3C$=GGUO&UwHe)&Wc<NkmMwG
zSHw0T+Ot9%bVdtY9Z(a0rA-~Y?-IShoz$uosxOt%)t1UjFgzs8I`Ek*Z3_KpBM=+v
t*uebNwyOTt9~z*43nIdMMe4^2&0lIBYD;gd;V>iZ&T0Kb{^$KQ|35aS-^c&}

literal 40649
zcmeHw30zHI^zW^%N;F93C`CnX^DJr9M4AlI;8r)?rkn0<A|!<fg$&7%S(!7FGNuwr
zlE@T7C8Wss);{;{?x`Osect>0-+T7&x9oHF+UvXbUVH7~>~n5QakzRoJ51f18KTYz
ziIDdW4-o^R1+jbr{g^=vnjb4Hn9iX^Xov|?D3mQ4>W`UPy3NzwD?U?pNl!Sn!7ke;
zz}QYv(B5j{YVjz;t&j-<lN(V)+r_cGdn<q51;v*U%tF@@Ohzz@mjH=VD6!GJIEWX|
zg19%NCkRj|y&!hw-P669oaj&ng~?_`vLTK52Vzzf`~%@VRJ%Xqe-1IiWwU~OP&y|#
z6h#yjDuvP$r89yf7-1Ajy%4vY05*;7PuHeY!Mzml%OFPiu0bpTF@xjJ2!jG~c^q~S
z(;M*U3^pwQ(uW9h>q!;i;<ay&4CF;PLZBklE?mA3?>@{2)s8|@hC=%SKbRT93}&)9
z5E1FJkdE}=2>B6zt{4}u5n_aA3QV|+eFXpXgSZs#QT}UiKM>+TR3QlM>lwl1Fi^T5
zn-ReX;jlwlY>p3`P5B7QL;8CI{~+8DdI*c@OQA5>-r>w3U&={P(f~-`4l&aAa$d~g
z#h$#_7GhK%U5G&kvBP<>I4^FI<fgyi#U;ErhZpbX#p@wP?Ox1_LwT_WFSdtR8u*61
zSeY01<HbU}IE)n(6dp>U%p63a42AsTA(n+$fp^~%V)#F{7L+y&;(UllK)e@Xd59Bv
z>3$F^!o4NL3J{NkSPo)gUi=D_gX9Y!M(GD3M&+!5cnHJ^5F`D9ZVCN{hA}uCCbCHG
z{S+t+*<btq+QdswDVrf>6TCJ1M_Pu^g~t9DcSR?zA9%c3RPR!*>;sW6aR+m}>gQW+
z+r8k`eH*9zi2nDCMvqx=!(?N4q5J2d`=Z2L-Lit#s8aXNx;RYG?PMWCglaqg^eW?`
z?3Rp{9kWdQSBpK=7Y~k7-<w#^P@=0?zs(%|lqIYASk*!%LUzIC==(kr2NvJ-KHksv
z#Z>V%BdSMgKa$=yZMV!1GqZdzojtFIC<twH+OG4WT*5F!x}Uvm#?WTFt$A^FQZt7K
zCg{(s!{trbR+YJ9z`4&AwY4`lS6QT_sLxobbYh%gPnPkuH%@8i+PA0wm@?`~ksg9F
zn-9Nul5xQfmtSsbsx%;U_0fjBhl}?NtxcYMuq3kn-XPsgRqIYVEqSR?uwYGSw&>Fc
zJ3sb-88hi`kNC^}JU^}U{>LeDQ}c2ch|O7|yeXgZBu89vsaWsk4;%^2lILfx=y)2~
z6~5KlJUB{ErXteNY40$<%7V&AU!6C#ucr!?DP`UBOVsJ)xB&@ompyW)-z=<~D7r}X
z?#{51z+>+0M4KPScaMnink+X$;ZpeuDaCOXks^(!N;0H|UQu~HIcV07pXDiM%(5mh
zJ;KH-r;87;e7*HhM831hZB^N!efDl0qH*@lYo8g{n`7l-c1>Bkc5Izuz{<2`-u=Jz
zl$4Wc9xbh+EwG|G<H2IT37YFGFJ7X&wN`p9XgcuA7ft6vJ;rqr#|;x+1z!^FW1@Om
zVBL*n)C2xreT<@Z>|YS@R&->khIyLTDPc)Ly@@wYW~@0r_u`9`eSHU5f2vu}`H(Tb
z`5g0%@je~l;Fpgc<!;H+$}w~ddSD~D+bm^S*k<LbWwzpy&q_4Md@K4kbnaw<7cqBZ
z$BQ`mg)J-ab$yhZvGn7j!D|-Dx-E!kQqfmFXrT4=_Jo&aBSo7mEU$d_+_XR^z3}IX
z)!)-kDfW3|@NV8@38|$e*(0eQ8m|)i3Uf+*gKjnoI;FjRYr4JU$BCbHAH)V1jyxyR
zU)p2!rZ^K%!SiJeqK~(Yj2)wz!0>+Pso{J~^~}mb;ZZsE8jC*8tD9ZCOuAN~eShsN
z+8Dhppp*&AOSb|nGem$nfT9KC72U&pM^PtI|NIY@Ukh0+Kpsn=d7(1_%a=puu^^Ay
z)1xzjygXRG4+vs;Sjsvbu>3@jH{_KMOKhhDmQMzGeV#mO1I&G$3Rpg$RQ}(zp8`O7
zyz)`Ooz(#@-wZBL|D(F2&{cUh$kRX`mUpy{bvoekPs7C+kVk^34mcg%cNB0w5!mNU
z<kcV99&SfRvWSP}T|nL(<gxC%DxV7Sn!NfW9l%=IsesFG0eKj6u~=w0zW76o<u$=*
zjd<-xA~^jI;;s3@L0*T~e%J!Grgv6;KggT%>JLrnvi@I#JhER@9@1ZD18%1rbet`(
z{jgQ)^7!)yd89v7HmZMT11^6D$XoKtM`>Lh|1Uva7v#~ngMAeR)X}Mc%hv)yJpTHC
zWLNFi3*_<qkNUr}0hgZw@@W2TUvF;)hpq+<qg5*!-#u`rwmmMEzX9?FAP;+S3MwDe
z(W!vtr9j9G<N*flvpa1=Sl$Wb@%n-KAE*DR5;$KHNgk*DNfz;O-h7ZZhWcZfuC`wi
z3>UQ@ue)9C{}_-*;~%O)%>A7m1GxT2K;D$6f7FJqmj4dq(fp6(5GLGqD&X>sV8A(m
zJihO0`O82a+27xmp9k`4Jb9$MuGar8$eVyXYBaLp&Ia7xG0^d-{n&20+JDPH9+i*q
zkZf1=_Y>sJc<o2BXb$U4!1cF-hka!KSpV(kk>6oq`6VEa*MF23r~gh4r{jE8AU__;
zN9Ce4JdSX_-^Ey7wl{_1Oltq{^0!aN@*yCP?H`T7uJ&IV$fNx$nm<w9F>d?v+TY`R
zcR(KXAF@3-=je36@^aAO8X%AC59zS0_CF8g(fC7nsO?>Ce>TXY{WmHXmEYCp7Xdij
zG6#8Ve<<!u!0opHd2GL(l|}eiekI7G`4_hx*+6FkmcIe=sQpL=J-^|30Nr;KuzWF`
zXE}63eh6$jCU!%9b~oj-yD9&(oAUPX&|}w)_NRipO*iDLx+y;rHcj0rKe(InC%P&B
zy_@n@FzI!t{VTgEe;4E@bff==z@YDrJiVLpd%G!L5Av4XXul>beBF@`@232jZpwe}
zro1DZ$LoUr(Aqb!qY1wq56}Nm@X}yrH{|btyjwTq#|-XX`Ab3Gr5oi}g1kpJ<n@OT
zZxLhvR{z^Up5Be}1>nG`JN5Sj`Ptnl|0>8cx*<OrUS@f9Lq4&a@~=R?JNmPNmu20l
z|1Oa4&iMHb^3%GZKUa9khtEII6w%pdP&|KZ26?pqMf*-9+tvMh4alSYKi>a#RbEb>
zfBud3-(7wF^aOd_ek88|)R}<WzeApTehAArdY_Hnm3Jy&`G@k{=P#r`n2Nf*e;+!W
z`}}~Oe-K7z+YecZXEh{;o*Q8sNj#IFcA$V_qz_o0iF8<Ih=5|$N2so-4KVzPd@ziN
zd+1IAR~cdy$T51hQ3FD__&M!wVuYgygvtloAp$u@_=dc6a*WcAfKdI%0iikD3<w1j
zBfdEhN{2la5ju)derq6v2hRXRec)M$2q;GUN!;6C#)uEkUPM4KvY#2;TOvldFgFq5
zS25}f4v+tTV~lKW39tVD6JyjrOM#G`B?6)Q<v=K)7~P|Q#?ZfUM5yB5IO3|||LC~<
ze`6f|ko<uM6i<iUjsQ=9!o?*+Q-wGF5VsA!EuLq1lVxG@v?U=uucmm|`DHhyhrAh~
z-jwa;F~$BkrQw;!rw^4z3O%Pu7~YFF+BU^(-@8w->Q#r&l5o-7fCIJ9OQA14mrX7n
zn4%!W@?W228}L9cf2ZToVXl3nwk=Oq=rtoX?6HQlTxgi&l2?26pO-#4;wAk?#rw@(
z#_6%FeljFnH232`EjQU^I`ymB;J5Y9R#qi9SY2*<bZ+~_hi>~+9xUtO784g5H!ij|
zRcUhk6M=r$Qlpe7SftQ2?i{F}?`v1z@O(fl2^XzxI8Ys?F0~4M6u&t|SI%XAa<RoK
zKMm{X$Q~ML%43+HM7`n<=#Abh8n(0V_Zg{+tA03^OqKOWFddm&6d#|rtj6H_ND?mE
zd*DE&nkl{6CVJ+@LWcfrwQV0S$QtOh+-XkOR#Sg|Ys*&0*q!4adp-No9Qh`_uE&Jl
zV?(Xp%uW0lRkd`>N6RT`3Vm;paD_1_#MJk385=GxT~<1N_oP(KOu0bcSBJ9;EJfd$
z9x*#R%2&Cx<*UbsDM!wYXn1FT)_ZxZ`nWND9<E6$i(7au$+7s<fYBsew1>cf`p)*M
zw_Uv1{pY7=P1!Z3FkeBVx4gah^iMlRew5nX<6uL*k=D;)dn(IJ_Wr!$zG;`%)1T4%
z1p>#;GPb*LEM%CtH3=8312|AMu4H--GwIcwx^HEQ+3w7c4+g4(4d?WTD)BWiooSK&
z@Y0&?8JjPwNmW>`58CVJa(w^9T*EU|r``3V_bBH(?|(?bh1W!_fx7Pfr9kDV(uADG
z4?1T*50>2>XV;?b6H>I&`@t#gJ+(>6pM!6|oa?jl6mz%3nZ(rRs#Al%n0?><Y5RH_
zh5l`z4+$6T{cxbZpZeJCeNpgvwa3+S4YDM(l07CT4lvfZ*mTMF{;|1fav|&ijPwWj
zcN?-Y62&$?o|G?sY_-YV;_963b<H=n7>p<3!fTb*Kn<AuP1Q1Z&%kjPIJT+TTbK)-
z>I;ra`Y6~{_)Pap<`n2lo(>dre5L(%#kB=b^|x#aoLB47Ja2B~^Lam}u1d7HOu|Kb
zBpj%p_C&~kf9XvR_Y_Z;V9AyDShDba*_0Z`hb{{9Jx2^_yj?w9vc}!wgcNl_ze%Sy
z{5)qgDJA8dTd#FtZx_Eh9KV)?OWrqA2QdXM7_9L<JXY##h2d~%r=_XGZ~2`+_V`D5
z<+{sjUX-gYO3T@~HZ|p>scW%OPxt9Z#)y2Bx-Zk>=-`r9ZZbHYgiC%Npi&%nD8KwX
zVTIQ9irMq;T0dPE-eT;nUgf>My85zN&tuMNPflFZzB4_OmR+OXH*@LmUMt+?%Ek=+
z)<6E4V6*mR6%sD`)&U3VN~y&4%L=z_sSJPMVx(`A86RS7zUchjhl7@9k3CS%SSS{g
zF)H29aOG$Dk>Xd9vJSm788UM4$D*2nBiuFyC2kl(!iCoXt$`{@ce)eqefX%&xmDA8
ztUSI+@5lFb>qRDwSvlD!yhnXwl+b}ygHD-PW#@h<x*%Wi$a#ve!PS)k5*Ji;?}gI)
zO)Dkg!ZCYmpoYrUY>~7}N?TWZB>H||!P?}cfMX|XC-v_i@qEs{BQp-ZFZx0EcV-+L
zsJzEWUDJ8N&`+P69t%s)AJTZ_%q|^WGA>&CaG=_UYiP&pd>QC!V|}Z7%AmBzA8khW
zqg%_ApBa$*#PRtu<1MA1k9_H=)30Zr{@XasEvqyO23N}$R*0S7Ay;#?ScFtxI96^A
z)L0Mq2}hqQYI`{>HNNBX?T$r^Nor|JZ4oW9ZfcrF=Bysp4~~SWkDJl-<bJPFMvS8O
zg5O?k?OSGNb*^sRTEXd6BwYM_g}|wc>I?>7eRiW_XohU&X5|}l@=Bi9EY|K{nli>s
zW=wf=?~x&Q)>p?aKC$GmcHM=BfW_ZuoZPULlYB9{Lg!n|vx5C3T=blV1J%f~sQLA_
z&ygp^H_2q>e^aiMYf$)Nxn|jerBjA$_~|yArJc?xUgCIQznZ5)MnPlgxgysk;btP!
zmwVJUY;}#ANy3HW$<{!n>5R9TZIW&E@b=6zZael`Pf#cn_T7AMdH-)~T0RE3<ldj|
z6jNw6@ztks{v*Ba)y+6ky=hndMS<!44rJBs%edP>!o|;%2%P$0Ds4;Ztm<!t$s;6J
z9jSht7k)VLc8;F?j#Ocd<GN8zdc|JV+m$c(nisijm!g|&^(wta>(^T-%N0_dIO$n4
z){$_@XQ0&AoE|xx=fO=9zR#A+ADvM4uCU_v?d&Dlqk2ZX9yicF`RIP{Cm#DAjN6v+
zOxQCyJ!Rh%<2@}81Wwld<UFzbc3}_+cTlSqpuVvuE{(bq{=;Of;P7Wg=96kGY&TuI
zHZe>`!TV)!Tz;X=tf(qQ*MO0M4^xw-YxlGrd30UxnOpr^?w%Vy?~~=o%Qr~4=s6k(
z>X1ig`tR9&<YAV{KI*{gdFe^v^BPm7%C;{v*S@ZHDq(l;vyHR!mWd`68CDK*5#8r2
z9CJ>{Cq-{sMbqXBtLVG#l5pWzu{BVWE`81RF}-m${pUw%<*BFkob_$Uml$__LQYsv
zuh6sBJ4>~etK580GDSeUu|lbb#`L?2Hv%#ZPdte_U2-eD)*1bl0q>*G^F9vLociVC
z_x@ChxJ7l}7es$M_ejoSr|E+iUi~oN-pyG2%5d$-9di4PWy~&5i?#RLB(XSk@TKy>
zr3VIG^8GSm!jb&dBwTcUfCE+2@w^muR9(@Pu;7oQ=;Hki`g|@>TKJ@?@os^Gnau{V
z;Vv89dz@+VGtaEs?XrUYavuA}K#%XY^;XpQJ(_w<fm~lW9%~KMjHM$FRP^z`<$R={
zz?75Uw71`xcGB8P>rmXAM;VKh=d5zCO}V*Aw!v=1mB(u4^qmzWB(F`id+Ki2<92aH
z;GG-WNcBbM95_&$++H8q`L6E6*5X+nbLXjSdLZo{w0VnY+|>(x?^b!;aoD32#7SEu
zUg*29Nb*eID?=43wwzc!hsk+<hHlkcBQKYPD~~}Trk*)y|K!yGn*l-RoF=o5MxI#o
zalq=qZzi%b9*v^ePVl+CDk{0B>D`}4N}3ghD`m@$UB5>`NUTcsdSlr%n$eBL<|N$V
zWZaBtyT(rwJH9$@;@Et*MXsSmL8{+%F3qAY9KAf|p6LXZ+C8Bu2`YoVju=NKs3}Yl
zebHdBY@)=HoA<}X`xQ@ieMQ1mAmg?a*4wXB>^C?;EMDpS?2wBI@xoE#nl``e|9-(7
zbKgOW!qpwUT=LHPZF8yr!7}cxrDjFFwz=@L&=f!Vy6VauPf56nWZa^NYjHbvR#7~z
zoGe<CxO>MS^&^t<9MN^=u8WjH=?fMv8b8n=_~yyTD-U}FM_<gkyg=lrn22ncjl18s
zBL%{eV@bFp$hglaF<TCfonO;$s>QO`Lk#8}|DyB!`HB=J_2Xp`&+A^FJJsiYX+c4~
z>sHly1@gX<NAgS6qE>FaH}!EyZ`#eLI-^LqBgwcA#>83|E=sPdI6ELQAbiwxHP@<>
zEj#i)<lT<3*0{UQSZL^3S<2=|nfsc~-CE;nQk~-ax$vuqSZe0}fY_@!+M*;}B{J?v
zuMcN?M4Zz-v+Rm<VzQP*+0ZXB!Ol^S)T#!D-V6OyAf|Tit(E(tM*AR3i}(iBYg2`H
zCu~j84c#LZEu$iRbRG#;nT$K;>3jK?tLY^#Mh-DIdv|@xX0OX%w`t$Xnrd>oNm``1
zAoqNbl+~QzSJiLirySVKxT2M9BYiiecE!N5qhgYQc?Kk06*BJKv+f_0;#wNwbX;f5
z9QF0>eS<6X)5c9&^Y=?{lt_48R3>5cvL@f_`j+d84Ph(~s)X6ABIn9+qn0wgZ^}iV
zl_lYhBIB})-?-(RJWv)QVl~)RLFY{0;t9EfZco`Owc<|o>1VP%R}D9NKz(KC*)(t0
z%{gC=?fuAMt{!|y{6vNA+)pur32YMXXfkf*_-*fm@~r06YTE<`?5<$39vHf&KKWW;
z?&9`M*QZ46rA~3?{n{Njtq)Bpkg7ShV5V(~Tt>r+*M_%m_B`v6YDL0TCF5Qw&Hc1w
z*R?A*66(j#IrZAQR?DPTJFIT_{^<o-A#tZ~KauhO(iqUE2YdDNOU&GWuSzAxTGzMk
z@*aJmaji(t=JO=nF=Sl&<=gKMDqc(&7p!k9ky@>#cJa_4)pACi=ZT&djFKvSh2ku|
zZ(YC26j&9$<9*y%*ZHD@;t%AGjTHW#q!hd7=ok_%jg0$IHsjcph?u4z-Ho3c3fDOu
z+qN;s<W#c7G?mgN4>$RqUS6M^CwSCq!SXOegOAz{p?w2wO;jn*M$~MHj__XGK(+^T
z-irfu$-~|6b@!@VDEwxb5ICd%ebeYv$M>f_Eq2|k7gk$(&bwGWdVO+I@tBa6)kcDj
z-x;Di=gd_9w7fyd{<6BmqAj(g`r>nAgiW<K8OQKlD6H*}RliQvZJa^5>(s-;3ZLwJ
zJYg35YM|u<t5glQh`P5quUsQHDVlob_7Buo$hqQupy2SBaVLh<*^_YLTc*}P^&0$z
zDq~K0u5j&(@>t~)pL#Z4H&}N<KQr~ts<d&JQeM8;5%DloJi^$0<e9G}5odk82JhW8
zVQZp;rGezR><!*dBwWo_RH(1MTJmfkpL4<Lmt9ka)>k#_hP)A6@U;GiMq$9YvjKfm
z?z|4{Yd3A!*Qd!F54n1%D;0YQ*)FOzUAW=k_nlLUmg|vlwaB>rGq0bJ5Sr{_Uahfi
zUtSbDc93CTvEc`m8k_pKD$T2VxzJ3}Q+ewUmzlLPlm`+;M`9K~jI%9S6Ofi7BA<Ri
zIFW~o&Iq*0xHW@?<}ci)eyUQyuriLZdiT-bOL8R*C-!I*X3F}WeNJmQKXl-*(>*>9
zoxW+y=jxdgS8w|AXp-JNM)CG^?ezm5TfiX+-aqKH!OeUxcIevo$cs;G2V|AVZ%c{G
zeAWNFrVsPoO$X_r8bd_yZOk;;6@A##W&Y@<S<S<wR?ert+@PUU@utDp=fV;iNe}Q2
zyfsjr!jncz-@NF&xq&6>Tqfx?pu(m0K%`Q=^Re6yPvhT>=zZa|!`@zvy=4_kM4i>M
zrP$}LABkTy<f>!iGa-w?UZx~m^qv6+>LkgCjeXabdK5+FAESBhxuyPmM()Uz$4~c0
zuU&k8pz8U8B~P4B(vzsc0^v{Y+kbM)cUDW;{Y7|EcyYn(2Wh%OBwT$A3NbZ$1aptR
z<nSyf`MeV@yOm>q4(NB|n`VzCW)H$OH5}{8W7C>0FKC<-(v<pm=&@JImQL5>?DF!(
zg=Z!#S@d2&rILh;--94{YMS^><Ev2#F-I<?30<qd9e!wz=q`mX@7DSqmj1Zy${riD
zikL#rL()1^Egx6geF~d?^;SPk<^5k=*4e0+xb9h$Mf4l-yVBP8DFp8DeK$8YEFW?3
zRES9g$91J>k@j7oCW)O!Y{9bJ@DmojkFHhdADv&hAk464LxW8~ysGF859e1YSECCB
z%3tIZgb?-R;-dF4I8ZAhr}fUCKk$0r!P({M>9;BbOLv%!zx~!aL05hHlFL;~eKaQb
zy!0_Hy=2iAou?{vfln@*mKqg2+aY>wa;b38;@%`&c&FDIsHY>FgnfiRWT+QcHVW?3
zvssjThjsd!=Ec{C?O003-N(M`Z63E>eUO#6;fT$};WqVQ@)P<=Og=q`DfQy^>SvC2
z1TK|0<A-;ht%3S{L!W#Z3&%D0-le|&DI(B|_FC%xrC6EyL5~uTu98R_P<DLLqpH;7
zn+{MKo?8f9?seeOAm(>{Jy!*<+t;qk4S&YNg~36*kHWxIw*7`CN0}ZHRTDLKX4MV7
z?)@q+KiK;HzFrK&;fdqx##xuG7c9*hCYgJpTv4*p*IvKOUG$23lG;Gc0`nU|(%*Qv
zXdIhhP>89LVjK3SA6lEN;5Pe{(}!o4TG3MaQJ)Nq#y*ry-TK_IsM)Pg;FGYOp4(bZ
zU2?nUI>f9=y7p5a(TpSBLnaBVHcsK;#sb!OGVW{}iAj%IrZ&APJXCYcEAp0b#vC1o
zxU9DV*MyvN4;%ed8`jr(RNrGRk4~)VWiB}_=lSy0qeh6yoilSNm~B7L^c{gq5tRo_
zQ!=h>_N2>?=N$VMUg>1!BG&hvmHAQC5*^(+dK>T8XV-jM_27fx<l?90*YDE%U)y@H
zSh;HK5QD`m%kP^D^rFY?&ECny6+~wmW@Ox(wT9WUZ>Y(iqGLiYXonZ{>@zo6d~u&~
z>xI@=?x;!2RY<<_dWp>w&w6X?Np?@1XMGszD`lFuPk)ekdF_PIs4@aK7X99ucwdO2
zspWm{e%~t5r%_{9_W46wFI|{!CJ|NFEA-&;)a&sBFFY4*Ib1D}t2Qi7;$6>Exj$qM
zOn%PlEpRToI9TwD{rymCHGxYda4j$>#MH!Nhc_!}kIwV-%8yaZ7Wu9%H(mGQfXJE8
zb{b`?pF3DT&`arp>a<~-=PEZv4V_Gv8d!8Xq4%bQ{a+_NH}*8!&)rv3MA6x#B^g)w
zk+06crkfLPc$VCiW6cykom?n1ATz<yP?+*L{=l8@yQ{`8Tw|iQQ+&^!y;+vB7D~~(
z95vF+##hDME%$16>&exxD6(HGGOpyw)mB#GL#0CHWZzHt5>t~|?Z~m3>AJ;~TIfDM
zW1sj4f%Q%C`*+;&*-!7EzQOXne9%unMtSPVS*v?&^jF%2ev>DF-nv0NfsA`zv7uPy
zwecK<^6Q%-X6CKG{NB^EJm`fhRZ8Vz;Pz+7tXYR8@-H0<a9mKnR6NJTYt@e<d#t|f
z{2aggNbSPI+F=B)C?ys!(R*7QsJ4u&0~}Y}?>9+vchF}k^9{Yc7^;`!N`HQ1)orH8
zzD%(DZt=DvFz5J^lvuZoGR|3P;-%L`OApw3l^7(J|CF0W;1bgV`j)UYP-iSVsTp%G
z>1wYzmo&7(9<ETbeiph+D>YfL*x+NvFs;UP9m?HnD`lwhlJiF$6$xEGs%CbaM4j6P
z`}3!?ch}R@c=bhgX4{Gi_Tc^bp2UuleeYh}e^6qer<5l9`M{^tQ7c~We!p>);)F;g
zU!h5y>P-8YBkWXGR_5nFP>MTac7A56rElKh8*VFO9C-Cb^Mf53_h>^wl9%{;@6;iQ
z;tF-^HNBeeuitt=E|aNpMziwXiAO1S3e>X$;`C#WH~SyjV{fti!P5^qRtwyPWn^Tp
zuJd8>aAN_}o{W1!WZ~VKf^*uJ-W*BDvoAVvJ@-n9`=m|Eo92ysH&rNxZjkrWeBG4!
zA7Yjt*0OVL9L_PUzH!>aMRCBBuTqrH?-Y5srhtjwtK&eeeWkc@hF6iPwadEKd+w_(
zy>;kv^xi|C$F!8KHseU-M(zq(UUezvj_ZWanPKW?*9K(m(_TBGCS#k2FKh8L;Vka^
z0E#HmFMdytu&L!IhdQJmz7usU)MJ6LY;CciIj1((s62D1ZldqV!^^c6S3H__&a&QB
zef-yHl{5BAHp`n&f4wtpu*j#S){V2~j3eqRNQ`6rexJY%YHp!K1P517NK#0heOGv2
zkjgtNwWpWf)LtC-Tq1I`yXm+=>k3vhOJ;D+m>a!_Ixz5AQ+(f+O-8S_oL2cd;c6&>
z8%w;8o{Ru_fqGZ|iQFFNafY)N)4~)FFAlNEZj^m3o6=9vX(6RJSoTBR!gXtRumUet
zTzMuPJ0Q5n{P5z5GvetniMsybgOxnP30&@bGDi#wF}3&WtaZK{^d?zo+xjF@Ojf+n
zIjHdTq{@kvZ$sl8j;~(rQLcIC#S+@_Ax()sd+8&twZH;0Qo^PpcEc^_;k66Ic>RX-
z>qN%AJpF?D8H&eR4{xu-{lAsYRv&mbv-EAFyrR0s*{cP`gKrfsZ5bo{u_7a@>|L|8
zrTd1Q=fhsx$dBt?Gl`*J`zn}(>rBRF%<-+e*=#*wpv<Fyd(qFPna&$>t0nx(xDiG%
zDbJi2<Xp?xut9pU$oTg!F6?p;VkZUVdmZ+&Nm5=Z$SxaF^?-Y>K@p7wO!%g-HBjeg
zkDz!yKGULhyJ}0{br(G@GZxzgms|>&mt<x+DVd!buzX_RQ^)P2$3H!_D~U5rf_c@n
z`D{Xisig9iNk3QIQ0CPajW^d;RM4;2;hM2xJ@k*eR2E$^V~m;k!~A=5?;daE(vRhc
z_cX9wptQ)*d!i)0Hc`=Oal;_-9zP$3DyJGceSENCyXDGVLwj&>iS=y?88=O1#*S4g
zY-#3hBUOg;`u^!Bg5UdmoEE2%mc8zZ@T#?weM&>k)oz}l&B)ibS!;X4YhUSNwM|Y^
z_aBH$&D`GdhQQ^nZ&S&*_vcs{Z`q(>lkj5WNvQ#!m*%{(p0aR*vclp1bB3;U|HR4c
z{rLKf{sV27eN(LZRz6CmUqs-oz^{h`9i~oNDbhQTd#=pg_e~??YKJ~~HYoJ$JlCEN
z%j))3>_1ucS^CYxwG<<%<(xkHlP(Kut}jg1w|%Kv9RAAT*kwV5L~&V{1Gz_TK09M+
z_-*+fqQ2aDeL5NUTDJQ@U7fN0+{bDD{IIYzeu2!~<~b^h&&sCQmDESOuW%dBkPT3{
z8o5cq>$L6G?VIh#x(>Laa_ZSss`_n@z7=i+E_WQmH+rpsy1Z)9%>82hrpH{Hd@((;
zGTD0Hn6KY#wM1=SkJ|3FSGY&g(?`)g*QD2*2<6YP3^uIT{LND3x>nicVDC4S_kC=*
zdf?*X?|l$B_4e)t`E|Vw5?Aze%x<o2;Cz-V2pCtDk-xWS$HN)*20!K<`;s-_%PZRv
z(R++mR1WC9*VZSB>H6%j|BvH?1;q2X>kxO{oY`7iz_rLIh&*X@^>$RF+#p@Yo0-?1
zRz3+69w_$ib8_tG(B!1?vg#FHDH&PJWS`A_G*g$d!h`x-?aQ!o@D0nVGh8fB=z+U0
zaVO(`bVwRyPM3J{wAMi?Y*pC!?{BunoU=JmaaR8LgtTu>JEiB))(Lu_$YVFNj>QJL
zm9F%5|NgUxZMD1X!q`lMV-_S_{CyLlhZPfDXi;whwrsWfy1Sp7bNWq>KF9B7*5p3?
ze0p)vIriYXhL<_wZ}n~^yjMBdv!ObpZ<zeGPq($t-Da<RaM3_AkB1uzV4mdqP6-W6
zZ`v-LYSDA5k-JAxpWso34&kFtE=|Z6xx8Ka&AX}cYRBhhiQK!L7hS!1!z<(T?b>(Z
zdqj9$ivBv|jtq_aJP)H7@V&^m%e89kXCw@NtQo)WPUFl)!)g=vGF_{O9=m!-_P9&^
zkm}J=4i|cD_}Q}Xf!+P10UMR_&MT-#W!wusnp@CMps%SfufAx0m_^17TJgi{p{HH(
zG%<(8Q(Gij)EgVW$7(%pURSuqEL(Ncj9LAgUTnITv+>jr^YnVp;NSp_QwK9F_P-h<
z=Otp=IPf@u%bnNhWL$y72jv_spVh(hUN{L%S1SG_zcN0j+|O{lWuc?S3aMM$^+HXm
zOtPlO-?Ka^xO*RS|A-;_8>n9dUZ;QCkQ9EDe17Il#$BQ>UGDF7PUW;4OLRuXt@^W<
z4+N*5)(crXy5gYHp^P~z7Y~(g&>uG5p)x$NXTe3)n(Yow+LMcqDeqXevZ|#ii&S49
zGVY2%3mtKZ=gV)5y)ZBJOO;u2dZl);xLf@++uW~7!<XroGqtv9RP?Fu{ZU6`$*q=>
zeV$4+KjPH&ZdJE<537pkOTzUf<Bs;#v>DiM`lz{9s#^~^U-NVBKmVcc=sgEZV<y#l
zWu8kqp<?(^&7=D5_sJVB?)A^9{O+)15W_`jSWCdxlPN<j-;;0|WZaMoj)l}K>L1^U
ztVlM@ovTVaxq4CN{h+CGKP581A79{L9nf3Mb9H61+=|3y(hK#Eiapwua&_=j*5=WQ
zFB;q@livsUk#Ua}T&^i&&Ke(nElSl)@LKZZv(87$#_KO<_|ubQ(jIblUDCVFUTBiV
z@gMG_aaADYGTUVJMy8IC$@(6Vb7rWfk?K2}jH~3BG`1##V=k8IG-vG&xv8--Q~kB;
zua3Pj{%KPLCrf|d(cv#0Qyq#&sOQ9Qbc^ocuD&eh*qY&=p61&3^LsOQ7YWy&jJxCV
z_(ii^jQ5F5yZYSWb<JFAi9*rUH%Uqh(@m6a%Z-&7d%rR`B6#S!z|U7y=MJIv38igt
zt!=QJ{4q#ODl{iK8F2rFUuytnc?$X)Vd!sop;!%FbTSaKyuTwB2$K$c0}|Uw(SO-M
z17!WN;Gr769r>mHP(J+k(bUv^f|y<Dr2o$}jdX?orke~X6uuw&rB1t}7E}iQdvFWk
z!yNcO7Nz084R;X|{&$qyojU)M?VbjRG06Mdd1mm@34Avd+dag8(!bIGm6wQPbc=M4
z|32V7_+SFg0b)`5|FrOLoAE!bNB-64p9cPE;GYKmY2cp*{%PQ!2L5T_p9cPE;GYKm
zY2cp*{%PQ!2L5T_p9cOy1Iu`CCzF_5VKHNNn2#DWgw3G`1*u_QLTavI42FrkmWDi=
zIhVom8zZkHPY+_w4q^Fme=YsvCpRFFSwQIg5dZ!i-=nin{2OyRTnq7N3B)M)0wFnc
zUW&gdM(>a@7W%FqU8DEEC>92SS4~7f?`|<Z{yj8m5lTaO;PnX+0^wgI+Y<<VgM~s6
z5Xy&IvJ?n~5FnHf-hEJ(0in9GfKVF3Uk-%whXSFr(>(RXpaLL>8Ha0YnShplqc0?b
z%0#%Ry+~K6JxD)D9}Yn1dp1WP^qm;`b{pwL9Y_NR{dNNV9s&J!0Db4L45R`y3TQNt
zD$p1p8W3tfsw-*>syosJYCCEx2Pgt45-18N8fY%iJfIk$`9KSR76QcrEdq)IS_~8q
zlmLXjQ$gR9pzl9s0nvfHfqZ~`flwc!KAR2X4}??|0EAS9bRP_a^o{h3^f?D83<$lw
zMc)FU?{?5PHj{wH0io}=#{;PWjQ~QwL!AI*24oFn17r(i4rBpj31kH{5y%b**-=j*
z2_Q+JWk9At=yw7NK#D-{T7>x9f#^5H(m;KImIEaMp?v#z{al7B522&}Mmj*cKsrHX
z!?8bskIF}OfcgdX52_RDD-j@M4^$u_AVDBBy6`<pLxk)Fr=e@!5JZ1|y7l^3c&Pu8
z9iaT^x6G)GGC-(}sI7f~kY13DpnSuCh5`)%8VrQ=JrD?&CkxkzkJ^K5QyvJ}sR|I%
z=WrlJAY~vWppif$fJOnKJ{$uy8b}og@o7NFz8!&(u91x+TgT=7F4l)U=(j0oJWK|1
z075o{`rRJL4#*bB1_;^71R!J=mOvIj=0M1XOo5PnAv;6%hQ=Kle?~xtKy)BfpIJc2
z=8^4t03jPiwu)@l4QM*hG$8c7;1nQNAQvEKASWOOkUx+g&}<;2U(pcw2ZB-zHx@IV
zsio%;ZUA$yUh7<<*>>(lIib&Ev7a1K0Z&ae_)oj_O%wK{0~~EN_>XIENCx}b0gkSk
zF5=)f&e#hQ!O>BJjzBg7eH@GZHVF!NYN%--j|SMI8ukbU96cysO^5gn1Jw`voFZfl
z)U*s*y~bg$SHRIy)6!Ja(Z~MZu>UGS0S#yl8adcw8}>LQC}5zbX`lv1hkeLlA6S9{
znou=l4cN;Z_L9Ztp#A{+j>Ueo5Jwk*5j~4I*y9%B=n$MB;KKNc#Xh+ZM~~n{^ElYc
z6(4O0kAwYsq4q$Fpdjc0qUUfg$OxE>gBpO|{#y?8DN%+0mh*cKZqI*Z7Vuc;ci}Mq
z#A2Uff&#$NM7{mr8UTlAYM0SqVE(?Pb6LKi1sL^u)wR}^3optD=)r`C>eRlsr36M3
zIVqIwkONt-Kl{+nFOpeI839kwjT%fEnZQ9ddRte<)$QR&5v)<IU)TZYjWCh_+5vC~
zLzo5SwY3A_5WVnU?EvRD+QDepk0JKMCMbaV8qIXr(;@cM24eww0r7sVH&g}e%@BKI
zBV@Et&tZRu*k2oPV3;5y#U2r{M>lRcT2K!5nTUP9AsZzIR;yP<>=h1VG++*aYGc2L
z*l!zf&>RBgU=NMh!yMP*wPAL_J{z&mIl|sF(BS^B4tOf~g(bpl@Y^w`P0SS7*B<s2
zCMck#rKXSOD(vMEd#Mu?P!i_$GWG+B{n&vFh@knPy(JR!jy}@C-&i8{D~bK$fev7{
zG=zHpcVuaTs?p4ceP8}}9{q1iJ)x<FP;Vhv{E_uy|9YsmiJ6CjJxF2?c!V7gD#bo2
zu}?jsPl^8V;h|yge82&F|BW5!bH@Vy(hGa?Lvtcwq7>`{6Z`N3G#HCW+1N`a_VPzq
zh&JM2KbqJNAivhw(<b&52r|$bR5$Ef6Z-}P88CN*hP`QGZ-4|E!NGnvvEM*~L!e;~
zo!G-5PexOdVhR%$nvt;2L88`ZVEv*!Ft*XsNWp$Rv0p+#0Ugi|S{Ht4AzILP6zua7
z`|N`nfDq!~FSf8ZLf}BZK~WU!2Ne4;1dajBVmjKbo<gywLn_i6u|Ei6fa6dIX~w0m
z3dM!q$+XVBY&w_YTw}hxPl|sOiNlEw3J0n2Tj|;oCJA|b4hQ_LB7d0g<7WKat0-Uw
z^R+H_9%YBmriTIGnPQ)^+#c5t@nzcEwT!@ISa|z_j=do!X64i2?CS=1wEzvw3Uyrw
zaM0>JVEvfrs)OI(@X_!$ZrEod=m716U}eHyQL$G?;Gh){dI9@Q#eN?N4$-IB11a{P
zNQK!FDgd(!_ECy`Eb=+niz)WP$md``r`S&;pMyQ2Vo#2I4)&FbeLeCy*qbW$2Fd3v
zhD|-vANCi?=U|Vl*rOz$gMGGQpObtJ_UekgQt~<2?<@9O$>(6tu-LODpM!nJV&9j1
z4)!jKy<_q@*#9i{pULN754G2M9ya+L?86rO;N)|#ms{+mlh46^aIqgxJ_mcs#hyO-
zob6IvU%J>AD4&D9?P71Cd=B;_i~V4t*^;}5#U6dzjZ;_{un$`7Lz9Z;5Mqadz0_ha
zn|u!TV~hQ8@;TVkE%wyO=U`vB*q0}tgT3WqZ=ZY)_NR;ef$}-n<1Y3X%I9F8yf1k^
zMfn`;wHJFG<#VuKU+kBZ&%vI5vFB1g2m1!bzD@ZY>^&HJKjm|<e_`w&mCwN*h_MG%
zK4*$N@mwE^eV3v>g*h2!|4lp&_Ff7rh5_uJVES)A^P$xYmLFI+f3Ys;s%h!LV)9=Z
zyjKq8JxLyEeWD~E@NxHWYF1G&MTgLXTsUFOkl7~kn*4`NgVu*l4%-JU36#9AGP}dp
zCCebk<0wt?(N5#HL+703uwjx1Puw1N<QF;n*?(RQ2OVghjyKS~ArK|;;|Z?@np+7N
zbY=q2wVJ<utTjd6!P)BI)10{Q31fzGOhf5m;5A%5j1j?%VEA)5p=={{xSI|BwZo#-
zLPCQB*lMh>+3LtCxjK!AXuRi2HO}n0rfi?^Fb4Qc57pKpT^pK)u)Kp9>cEU(uz5HB
zbhbZ(9m#+{rpWORp@nMc>UB_Hn|x4mn|m7fi4WDF1Bf=|0tB}LG~!{TbE4e(gDCd`
zMA07Y52f-|VhEB*HHILF9;iqPqq95GHdZC%LJ$xEf+p-jJ6*IEuV}4KHJA5L4>G(f
zkV5-MD9%$`-eeqXnqdRjfvSn~0uU!+zHkQ;k!;8w$)<5n98gK#;eLLMuy77DD4gR*
zGoVGliMTJF(~+K7Q2@e<0&_qcVZb0fwKIa592kGvrgYyhx}RUPIwOQl3t@!?1K`|_
z;C?LtMYI7xU4aVhxG&n&1VJDl^0;O+;z{)n2<RCF1W^PHM29>`z=staOb_v;F`{6I
zvUzumFhBSJofSz7WB4+|7(N_!Fe8{177c@#K@aYr7vRYfihzHK5v8q-N?@^RkN`Lx
z$hGDH>DFta--&KOQyF(E>;QwC0kx3$p%CaqpWBHY$dH=_lH^-x0X#<kL8rXuauDTR
z5t{wekZ!MB5O1G^j6(O11GKff0HO67h5$T|{h{J*yPYr2vjZp|J*57@Bsu=@-~h7R
z^F<o>Oc4q3mWB|PF9Xd}8Z@qN)ebs>G<@N~{SO5bCjn3{5ySEUrvQJD;GQag2={_U
zuGJqzNo@gn)Fc}BtOCh}hxjsmm^5Mm?4WhzYyd*OMOrh2(b%!7f4lU+Oxiio|1Le~
z5&RFOwpq=g;@oAO7~8r?Hk9rYNT1E11+ZBm9W<Yu4N%CpH13fH0>irm8d}e399AGB
zq%|o#EQmh`z;DtzD6IW7fX+IId8h*!aydjZx%(9w6bt?H=gBz?O{`%)#Hb;<(wD&q
zg`vb@g!sao$qXelL_>oPX6+7Z|KHpI{R<2*`#;wUAISg&zU0@j1AV|D4~W3OkX!qY
z#z|@{NRjTqQ2xBXY1KWy+X3Ylsl*~j^rUGhY$VlzK%*gk9kiouMu5dKpgrE?&dq{w
zn6uRwArY|Tg$B`m7(7u1Cz2HwsOCeb`7whS+-x)-e>$@xdnaduLdmzFQ238%MQ9j{
z!=eSjvfDum_?ZBRe?#=5&L4*_p{BM?KuTzv0$6Z7g0N<XLBsvRgJ^yonJ6h2fRXNC
zRz>GyovPJ;72c{SaxW0-`V(ScG{^=<Ltgzqj`Uy8=1}QBO{3&F8;T>}!W4my_)xpi
zB!%A4bv#9(9Dso$Flu;theSQmn=&;G`Is2kAQN~R@CAoICj_4OeAsMsu18}8doz4}
z8NMAD3_gnhe0)h5`=6#j-YF!A@vdMS0f&(KsA{21AC?a*hy^Estne@&1}&J5X0#4!
z(Iy{Ywz(&a_fKm)zf4r6)(Y?q0;!sNY^Vl5Des^z+)RMNy`T{Xi@39R$Fd!W^KyVZ
z?+Ru$beM<`Ofi$rjG~2x`snGjrUWrV!lP*Pu;6wnQKUTV@DSMSl2am?A=+APQhe!Q
zkk}@P3(cg%BQh=@hO{@^w@ofCRGVC|&ktiUecRLur;%&n2VWDlhih#Qss7xwU$vX9
zZKx5|T02%KBcweZw|bx-5+1LuZ4+??+os|wwoT<$D2T-hnN5P_riL-uJ`w*(4Q7V2
z^>i3P9T-4sGq^Zd0t}*U0?95qkH)K2D;V$oudsOSZpGr=cM+?NW&x|seHXFX^c`Te
zx$hzt**E}-eA`7NlIZ~u>8^`7B(nw_(p?vExMoBQ8Neak{S^)|99waSyT8IAMpi2h
zao0s0Y&kHs01oM{i#XV7xHzP{zr=yIKz%R{+;tI$KeGZ1|E7y5{3#ny_%~fd;aUrt
z<pG6%(?t~iA_FM=n=YboRl)70*6S`}v@VHIk=E-jVsO>Ktw-ziUtw@9g^SU3J89h%
zKwDa`N%ioBtv{UShNyGj8h5a0a5Df5_X2IS@%b<J5T1cf<`|~De7s!bDY6}lWc;us
zwiV-c9W+-@hZf9^@M*_x)dsL<hlF>)4Gw1qwPTaYr7_@axE;F{8zCdUzu#|jAoO2m
zA&to2Rv$03ziU7!E0Ph0_H#ju-!_t%uz$hjkDqpR=Z}fs)(edG7l?c#Mvd;kPTCdN
zR*r8zZPQ8C^f%C;!e1dn@?QbBRT3Y$ZTeq9^E-wQ%TN0&NUkPZErE~Qj?oS9cBa_A
zh<5C56hs;m?F%8X{;Gg>Ly`~Qj`3IETvem#nLj1)OZXMHo5i%wO}{FomHAhNw4WdO
zCA8=K6@05Q(9{l72_L>ar`zzqm=?H8O7|uP{<6fcJJ*Q#oWH6&Nx#H)mA8hrN&YKf
z@+!=)GdZ=Bu;i5<fVfLOoQLpUtfGwse#PIxJkEbt4;cI#IEUgLlOr7bCy@SdvVyY#
z622r(SN?R?#Q$;tB;or3;!uZsD2ncgT6NHb);v6EB0qtlO&p2x;3GRa6#CUUAe^Ve
t`Q@KZRmmrRPyzBSC=tH)AWnulsD5kpK$>`A4PTWI8smO;@jvh1{{<!=ItBm$

diff --git a/substrate/frame/revive/rpc/examples/js/package.json b/substrate/frame/revive/rpc/examples/js/package.json
index 0119f4f34a1..f2c4b8d7809 100644
--- a/substrate/frame/revive/rpc/examples/js/package.json
+++ b/substrate/frame/revive/rpc/examples/js/package.json
@@ -9,15 +9,15 @@
 		"preview": "vite preview"
 	},
 	"dependencies": {
-		"@parity/revive": "^0.0.5",
-		"ethers": "^6.13.4",
+		"@parity/revive": "^0.0.9",
+		"ethers": "^6.13.5",
 		"solc": "^0.8.28",
-		"viem": "^2.21.47"
+		"viem": "^2.22.4"
 	},
 	"devDependencies": {
-		"prettier": "^3.3.3",
-		"@types/bun": "^1.1.13",
-		"typescript": "^5.5.3",
-		"vite": "^5.4.8"
+		"prettier": "^3.4.2",
+		"@types/bun": "^1.1.15",
+		"typescript": "^5.7.2",
+		"vite": "^5.4.11"
 	}
 }
diff --git a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts
index a37b850214b..f26f275ec3d 100644
--- a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts
+++ b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts
@@ -55,17 +55,14 @@ for (const file of input) {
 	}
 
 	console.log('Compiling with revive...')
-	const reviveOut = await compile(input)
+	const reviveOut = await compile(input, { bin: 'resolc' })
 
 	for (const contracts of Object.values(reviveOut.contracts)) {
 		for (const [name, contract] of Object.entries(contracts)) {
 			console.log(`📜 Add PVM contract ${name}`)
 			const abi = contract.abi
 			const abiName = `${name}Abi`
-			writeFileSync(
-				join(abiDir, `${name}.json`),
-				JSON.stringify(abi, null, 2)
-			)
+			writeFileSync(join(abiDir, `${name}.json`), JSON.stringify(abi, null, 2))
 
 			writeFileSync(
 				join(abiDir, `${name}.ts`),
diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
index 871adeccbc9..86b8ec50bd6 100644
--- a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
+++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
@@ -1,9 +1,73 @@
-import { jsonRpcErrors, procs, createEnv, getByteCode } from './geth-diff-setup.ts'
+import {
+	jsonRpcErrors,
+	createEnv,
+	getByteCode,
+	killProcessOnPort,
+	waitForHealth,
+	polkadotSdkPath,
+} from './util.ts'
 import { afterAll, afterEach, beforeAll, describe, expect, test } from 'bun:test'
 import { encodeFunctionData, Hex, parseEther } from 'viem'
 import { ErrorsAbi } from '../abi/Errors'
 import { FlipperCallerAbi } from '../abi/FlipperCaller'
 import { FlipperAbi } from '../abi/Flipper'
+import { Subprocess, spawn } from 'bun'
+
+const procs: Subprocess[] = []
+beforeAll(async () => {
+	if (!process.env.USE_LIVE_SERVERS) {
+		procs.push(
+			// Run geth on port 8546
+			await (async () => {
+				killProcessOnPort(8546)
+				const proc = spawn(
+					'geth --http --http.api web3,eth,debug,personal,net --http.port 8546 --dev --verbosity 0'.split(
+						' '
+					),
+					{ stdout: Bun.file('/tmp/geth.out.log'), stderr: Bun.file('/tmp/geth.err.log') }
+				)
+
+				await waitForHealth('http://localhost:8546').catch()
+				return proc
+			})(),
+			//Run the substate node
+			(() => {
+				killProcessOnPort(9944)
+				return spawn(
+					[
+						'./target/debug/substrate-node',
+						'--dev',
+						'-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug',
+					],
+					{
+						stdout: Bun.file('/tmp/kitchensink.out.log'),
+						stderr: Bun.file('/tmp/kitchensink.err.log'),
+						cwd: polkadotSdkPath,
+					}
+				)
+			})(),
+			// Run eth-rpc on 8545
+			await (async () => {
+				killProcessOnPort(8545)
+				const proc = spawn(
+					[
+						'./target/debug/eth-rpc',
+						'--dev',
+						'--node-rpc-url=ws://localhost:9944',
+						'-l=rpc-metrics=debug,eth-rpc=debug',
+					],
+					{
+						stdout: Bun.file('/tmp/eth-rpc.out.log'),
+						stderr: Bun.file('/tmp/eth-rpc.err.log'),
+						cwd: polkadotSdkPath,
+					}
+				)
+				await waitForHealth('http://localhost:8545').catch()
+				return proc
+			})()
+		)
+	}
+})
 
 afterEach(() => {
 	jsonRpcErrors.length = 0
diff --git a/substrate/frame/revive/rpc/examples/js/src/lib.ts b/substrate/frame/revive/rpc/examples/js/src/lib.ts
index e1f0e780d95..1470f492e34 100644
--- a/substrate/frame/revive/rpc/examples/js/src/lib.ts
+++ b/substrate/frame/revive/rpc/examples/js/src/lib.ts
@@ -50,7 +50,6 @@ if (geth) {
 	child.unref()
 	await new Promise((resolve) => setTimeout(resolve, 500))
 }
-
 const rpcUrl = proxy
 	? 'http://localhost:8080'
 	: westend
diff --git a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts
index 8289ac8b76e..4983a6f3b30 100644
--- a/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts
+++ b/substrate/frame/revive/rpc/examples/js/src/piggy-bank.ts
@@ -4,7 +4,7 @@ import { parseEther } from 'viem'
 
 const hash = await walletClient.deployContract({
 	abi: PiggyBankAbi,
-	bytecode: getByteCode('piggyBank'),
+	bytecode: getByteCode('PiggyBank'),
 })
 const deployReceipt = await walletClient.waitForTransactionReceipt({ hash })
 const contractAddress = deployReceipt.contractAddress
@@ -31,9 +31,7 @@ assert(contractAddress, 'Contract address should be set')
 		value: parseEther('10'),
 	})
 
-	request.nonce = 0
 	const hash = await walletClient.writeContract(request)
-
 	const receipt = await walletClient.waitForTransactionReceipt({ hash })
 	console.log(`Deposit receipt: ${receipt.status}`)
 }
diff --git a/substrate/frame/revive/rpc/examples/js/src/spammer.ts b/substrate/frame/revive/rpc/examples/js/src/spammer.ts
new file mode 100644
index 00000000000..c038afa71f0
--- /dev/null
+++ b/substrate/frame/revive/rpc/examples/js/src/spammer.ts
@@ -0,0 +1,104 @@
+import { spawn } from 'bun'
+import {
+	createEnv,
+	getByteCode,
+	killProcessOnPort,
+	polkadotSdkPath,
+	timeout,
+	wait,
+	waitForHealth,
+} from './util'
+import { FlipperAbi } from '../abi/Flipper'
+
+//Run the substate node
+console.log('🚀 Start kitchensink...')
+killProcessOnPort(9944)
+spawn(
+	[
+		'./target/debug/substrate-node',
+		'--dev',
+		'-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug',
+	],
+	{
+		stdout: Bun.file('/tmp/kitchensink.out.log'),
+		stderr: Bun.file('/tmp/kitchensink.err.log'),
+		cwd: polkadotSdkPath,
+	}
+)
+
+// Run eth-indexer
+console.log('🔍 Start indexer...')
+spawn(
+	[
+		'./target/debug/eth-indexer',
+		'--node-rpc-url=ws://localhost:9944',
+		'-l=eth-rpc=debug',
+		'--database-url ${polkadotSdkPath}/substrate/frame/revive/rpc/tx_hashes.db',
+	],
+	{
+		stdout: Bun.file('/tmp/eth-indexer.out.log'),
+		stderr: Bun.file('/tmp/eth-indexer.err.log'),
+		cwd: polkadotSdkPath,
+	}
+)
+
+// Run eth-rpc on 8545
+console.log('💻 Start eth-rpc...')
+killProcessOnPort(8545)
+spawn(
+	[
+		'./target/debug/eth-rpc',
+		'--dev',
+		'--node-rpc-url=ws://localhost:9944',
+		'-l=rpc-metrics=debug,eth-rpc=debug',
+	],
+	{
+		stdout: Bun.file('/tmp/eth-rpc.out.log'),
+		stderr: Bun.file('/tmp/eth-rpc.err.log'),
+		cwd: polkadotSdkPath,
+	}
+)
+await waitForHealth('http://localhost:8545').catch()
+
+const env = await createEnv('kitchensink')
+const wallet = env.accountWallet
+
+console.log('🚀 Deploy flipper...')
+const hash = await wallet.deployContract({
+	abi: FlipperAbi,
+	bytecode: getByteCode('Flipper'),
+})
+
+const deployReceipt = await wallet.waitForTransactionReceipt({ hash })
+if (!deployReceipt.contractAddress) throw new Error('Contract address should be set')
+const flipperAddr = deployReceipt.contractAddress
+
+let nonce = await wallet.getTransactionCount(wallet.account)
+let callCount = 0
+
+console.log('🔄 Starting nonce:', nonce)
+console.log('🔄 Starting loop...')
+try {
+	while (true) {
+		callCount++
+		console.log(`🔄 Call flip (${callCount})...`)
+		const { request } = await wallet.simulateContract({
+			account: wallet.account,
+			address: flipperAddr,
+			abi: FlipperAbi,
+			functionName: 'flip',
+		})
+
+		console.log(`🔄 Submit flip (call ${callCount}...`)
+
+		await Promise.race([
+			(async () => {
+				const hash = await wallet.writeContract(request)
+				await wallet.waitForTransactionReceipt({ hash })
+			})(),
+			timeout(15_000),
+		])
+	}
+} catch (err) {
+	console.error('Failed with error:', err)
+}
diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts b/substrate/frame/revive/rpc/examples/js/src/util.ts
similarity index 62%
rename from substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts
rename to substrate/frame/revive/rpc/examples/js/src/util.ts
index 3db2453f247..bdc64eea1ef 100644
--- a/substrate/frame/revive/rpc/examples/js/src/geth-diff-setup.ts
+++ b/substrate/frame/revive/rpc/examples/js/src/util.ts
@@ -1,10 +1,10 @@
-import { spawn, spawnSync, Subprocess } from 'bun'
+import { spawnSync } from 'bun'
 import { resolve } from 'path'
 import { readFileSync } from 'fs'
 import { createWalletClient, defineChain, Hex, http, publicActions } from 'viem'
-import { privateKeyToAccount } from 'viem/accounts'
+import { privateKeyToAccount, nonceManager } from 'viem/accounts'
 
-export function getByteCode(name: string, evm: boolean): Hex {
+export function getByteCode(name: string, evm: boolean = false): Hex {
 	const bytecode = evm ? readFileSync(`evm/${name}.bin`) : readFileSync(`pvm/${name}.polkavm`)
 	return `0x${Buffer.from(bytecode).toString('hex')}`
 }
@@ -15,6 +15,8 @@ export type JsonRpcError = {
 	data: Hex
 }
 
+export const polkadotSdkPath = resolve(__dirname, '../../../../../../..')
+
 export function killProcessOnPort(port: number) {
 	// Check which process is using the specified port
 	const result = spawnSync(['lsof', '-ti', `:${port}`])
@@ -76,7 +78,8 @@ export async function createEnv(name: 'geth' | 'kitchensink') {
 
 	const accountWallet = createWalletClient({
 		account: privateKeyToAccount(
-			'0xa872f6cbd25a0e04a08b1e21098017a9e6194d101d75e13111f71410c59cd57f'
+			'0x5fb92d6e98884f76de468fa3f6278f8807c48bebc13595d45af5bdc4da702133',
+			{ nonceManager }
 		),
 		transport,
 		chain,
@@ -85,6 +88,14 @@ export async function createEnv(name: 'geth' | 'kitchensink') {
 	return { serverWallet, accountWallet, evm: name == 'geth' }
 }
 
+export function wait(ms: number) {
+	return new Promise((resolve) => setTimeout(resolve, ms))
+}
+
+export function timeout(ms: number) {
+	return new Promise((_resolve, reject) => setTimeout(() => reject(new Error('timeout hit')), ms))
+}
+
 // wait for http request to return 200
 export function waitForHealth(url: string) {
 	return new Promise<void>((resolve, reject) => {
@@ -120,58 +131,3 @@ export function waitForHealth(url: string) {
 		}, 1000)
 	})
 }
-
-export const procs: Subprocess[] = []
-const polkadotSdkPath = resolve(__dirname, '../../../../../../..')
-if (!process.env.USE_LIVE_SERVERS) {
-	procs.push(
-		// Run geth on port 8546
-		await (async () => {
-			killProcessOnPort(8546)
-			const proc = spawn(
-				'geth --http --http.api web3,eth,debug,personal,net --http.port 8546 --dev --verbosity 0'.split(
-					' '
-				),
-				{ stdout: Bun.file('/tmp/geth.out.log'), stderr: Bun.file('/tmp/geth.err.log') }
-			)
-
-			await waitForHealth('http://localhost:8546').catch()
-			return proc
-		})(),
-		//Run the substate node
-		(() => {
-			killProcessOnPort(9944)
-			return spawn(
-				[
-					'./target/debug/substrate-node',
-					'--dev',
-					'-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug',
-				],
-				{
-					stdout: Bun.file('/tmp/kitchensink.out.log'),
-					stderr: Bun.file('/tmp/kitchensink.err.log'),
-					cwd: polkadotSdkPath,
-				}
-			)
-		})(),
-		// Run eth-rpc on 8545
-		await (async () => {
-			killProcessOnPort(8545)
-			const proc = spawn(
-				[
-					'./target/debug/eth-rpc',
-					'--dev',
-					'--node-rpc-url=ws://localhost:9944',
-					'-l=rpc-metrics=debug,eth-rpc=debug',
-				],
-				{
-					stdout: Bun.file('/tmp/eth-rpc.out.log'),
-					stderr: Bun.file('/tmp/eth-rpc.err.log'),
-					cwd: polkadotSdkPath,
-				}
-			)
-			await waitForHealth('http://localhost:8545').catch()
-			return proc
-		})()
-	)
-}
diff --git a/substrate/frame/revive/rpc/examples/westend_local_network.toml b/substrate/frame/revive/rpc/examples/westend_local_network.toml
index 28295db7613..76561be814e 100644
--- a/substrate/frame/revive/rpc/examples/westend_local_network.toml
+++ b/substrate/frame/revive/rpc/examples/westend_local_network.toml
@@ -29,13 +29,9 @@ name = "asset-hub-westend-collator1"
 rpc_port = 9011
 ws_port = 9944
 command = "{{POLKADOT_PARACHAIN_BINARY}}"
-args = [
-	"-lparachain=debug,runtime::revive=debug",
-]
+args = ["-lparachain=debug,runtime::revive=debug"]
 
 [[parachains.collators]]
 name = "asset-hub-westend-collator2"
 command = "{{POLKADOT_PARACHAIN_BINARY}}"
-args = [
-	"-lparachain=debug,runtime::revive=debug",
-]
+args = ["-lparachain=debug,runtime::revive=debug"]
diff --git a/substrate/frame/revive/rpc/migrations/20241205165418_create_transaction_hashes.sql b/substrate/frame/revive/rpc/migrations/20241205165418_create_transaction_hashes.sql
new file mode 100644
index 00000000000..43405bea9d0
--- /dev/null
+++ b/substrate/frame/revive/rpc/migrations/20241205165418_create_transaction_hashes.sql
@@ -0,0 +1,15 @@
+-- Create DB:
+-- DATABASE_URL="..." cargo sqlx database create
+--
+-- Run migration:
+-- DATABASE_URL="..." cargo sqlx migrate run
+--
+-- Update compile time artifacts:
+-- DATABASE_URL="..." cargo sqlx prepare
+CREATE TABLE transaction_hashes (
+  transaction_hash CHAR(64) NOT NULL PRIMARY KEY,
+  transaction_index INTEGER NOT NULL,
+  block_hash CHAR(64) NOT NULL
+);
+
+CREATE INDEX idx_block_hash ON transaction_hashes (block_hash);
diff --git a/substrate/frame/revive/rpc/src/block_info_provider.rs b/substrate/frame/revive/rpc/src/block_info_provider.rs
new file mode 100644
index 00000000000..0e91869cdda
--- /dev/null
+++ b/substrate/frame/revive/rpc/src/block_info_provider.rs
@@ -0,0 +1,250 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::{
+	client::{SubstrateBlock, SubstrateBlockNumber},
+	subxt_client::SrcChainConfig,
+	ClientError,
+};
+use jsonrpsee::core::async_trait;
+use sp_core::H256;
+use std::{
+	collections::{HashMap, VecDeque},
+	sync::Arc,
+};
+use subxt::{backend::legacy::LegacyRpcMethods, OnlineClient};
+use tokio::sync::RwLock;
+
+/// BlockInfoProvider cache and retrieves information about blocks.
+#[async_trait]
+pub trait BlockInfoProvider: Send + Sync {
+	/// Cache a new block and return the pruned block hash.
+	async fn cache_block(&self, block: SubstrateBlock) -> Option<H256>;
+
+	/// Return the latest ingested block.
+	async fn latest_block(&self) -> Option<Arc<SubstrateBlock>>;
+
+	/// Get block by block_number.
+	async fn block_by_number(
+		&self,
+		block_number: SubstrateBlockNumber,
+	) -> Result<Option<Arc<SubstrateBlock>>, ClientError>;
+
+	/// Get block by block hash.
+	async fn block_by_hash(&self, hash: &H256) -> Result<Option<Arc<SubstrateBlock>>, ClientError>;
+}
+
+/// Provides information about blocks.
+#[derive(Clone)]
+pub struct BlockInfoProviderImpl {
+	/// The shared in memory cache.
+	cache: Arc<RwLock<BlockCache<SubstrateBlock>>>,
+
+	/// The rpc client, used to fetch blocks not in the cache.
+	rpc: LegacyRpcMethods<SrcChainConfig>,
+
+	/// The api client, used to fetch blocks not in the cache.
+	api: OnlineClient<SrcChainConfig>,
+}
+
+impl BlockInfoProviderImpl {
+	pub fn new(
+		cache_size: usize,
+		api: OnlineClient<SrcChainConfig>,
+		rpc: LegacyRpcMethods<SrcChainConfig>,
+	) -> Self {
+		Self { api, rpc, cache: Arc::new(RwLock::new(BlockCache::new(cache_size))) }
+	}
+
+	async fn cache(&self) -> tokio::sync::RwLockReadGuard<'_, BlockCache<SubstrateBlock>> {
+		self.cache.read().await
+	}
+}
+
+#[async_trait]
+impl BlockInfoProvider for BlockInfoProviderImpl {
+	async fn cache_block(&self, block: SubstrateBlock) -> Option<H256> {
+		let mut cache = self.cache.write().await;
+		cache.insert(block)
+	}
+
+	async fn latest_block(&self) -> Option<Arc<SubstrateBlock>> {
+		let cache = self.cache().await;
+		cache.buffer.back().cloned()
+	}
+
+	async fn block_by_number(
+		&self,
+		block_number: SubstrateBlockNumber,
+	) -> Result<Option<Arc<SubstrateBlock>>, ClientError> {
+		let cache = self.cache().await;
+		if let Some(block) = cache.blocks_by_number.get(&block_number).cloned() {
+			return Ok(Some(block));
+		}
+
+		let Some(hash) = self.rpc.chain_get_block_hash(Some(block_number.into())).await? else {
+			return Ok(None);
+		};
+
+		self.block_by_hash(&hash).await
+	}
+
+	async fn block_by_hash(&self, hash: &H256) -> Result<Option<Arc<SubstrateBlock>>, ClientError> {
+		let cache = self.cache().await;
+		if let Some(block) = cache.blocks_by_hash.get(hash).cloned() {
+			return Ok(Some(block));
+		}
+
+		match self.api.blocks().at(*hash).await {
+			Ok(block) => Ok(Some(Arc::new(block))),
+			Err(subxt::Error::Block(subxt::error::BlockError::NotFound(_))) => Ok(None),
+			Err(err) => Err(err.into()),
+		}
+	}
+}
+
+/// The cache maintains a buffer of the last N blocks,
+struct BlockCache<Block> {
+	/// The maximum buffer's size.
+	max_cache_size: usize,
+
+	/// A double-ended queue of the last N blocks.
+	/// The most recent block is at the back of the queue, and the oldest block is at the front.
+	buffer: VecDeque<Arc<Block>>,
+
+	/// A map of blocks by block number.
+	blocks_by_number: HashMap<SubstrateBlockNumber, Arc<Block>>,
+
+	/// A map of blocks by block hash.
+	blocks_by_hash: HashMap<H256, Arc<Block>>,
+}
+
+/// Provides information about a block,
+/// This is an abstratction on top of [`SubstrateBlock`] used to test the [`BlockCache`].
+/// Can be removed once https://github.com/paritytech/subxt/issues/1883 is fixed.
+trait BlockInfo {
+	/// Returns the block hash.
+	fn hash(&self) -> H256;
+	/// Returns the block number.
+	fn number(&self) -> SubstrateBlockNumber;
+}
+
+impl BlockInfo for SubstrateBlock {
+	fn hash(&self) -> H256 {
+		SubstrateBlock::hash(self)
+	}
+	fn number(&self) -> u32 {
+		SubstrateBlock::number(self)
+	}
+}
+
+impl<B: BlockInfo> BlockCache<B> {
+	/// Create a new cache with the given maximum buffer size.
+	pub fn new(max_cache_size: usize) -> Self {
+		Self {
+			max_cache_size,
+			buffer: Default::default(),
+			blocks_by_number: Default::default(),
+			blocks_by_hash: Default::default(),
+		}
+	}
+
+	/// Insert an entry into the cache, and prune the oldest entry if the cache is full.
+	pub fn insert(&mut self, block: B) -> Option<H256> {
+		let mut pruned_block_hash = None;
+		if self.buffer.len() >= self.max_cache_size {
+			if let Some(block) = self.buffer.pop_front() {
+				let hash = block.hash();
+				self.blocks_by_hash.remove(&hash);
+				self.blocks_by_number.remove(&block.number());
+				pruned_block_hash = Some(hash);
+			}
+		}
+
+		let block = Arc::new(block);
+		self.buffer.push_back(block.clone());
+		self.blocks_by_number.insert(block.number(), block.clone());
+		self.blocks_by_hash.insert(block.hash(), block);
+		pruned_block_hash
+	}
+}
+
+#[cfg(test)]
+pub mod test {
+	use super::*;
+
+	struct MockBlock {
+		block_number: SubstrateBlockNumber,
+		block_hash: H256,
+	}
+
+	impl BlockInfo for MockBlock {
+		fn hash(&self) -> H256 {
+			self.block_hash
+		}
+
+		fn number(&self) -> u32 {
+			self.block_number
+		}
+	}
+
+	#[test]
+	fn cache_insert_works() {
+		let mut cache = BlockCache::<MockBlock>::new(2);
+
+		let pruned = cache.insert(MockBlock { block_number: 1, block_hash: H256::from([1; 32]) });
+		assert_eq!(pruned, None);
+
+		let pruned = cache.insert(MockBlock { block_number: 2, block_hash: H256::from([2; 32]) });
+		assert_eq!(pruned, None);
+
+		let pruned = cache.insert(MockBlock { block_number: 3, block_hash: H256::from([3; 32]) });
+		assert_eq!(pruned, Some(H256::from([1; 32])));
+
+		assert_eq!(cache.buffer.len(), 2);
+		assert_eq!(cache.blocks_by_number.len(), 2);
+		assert_eq!(cache.blocks_by_hash.len(), 2);
+	}
+
+	/// A Noop BlockInfoProvider used to test [`db::DBReceiptProvider`].
+	pub struct MockBlockInfoProvider;
+
+	#[async_trait]
+	impl BlockInfoProvider for MockBlockInfoProvider {
+		async fn cache_block(&self, _block: SubstrateBlock) -> Option<H256> {
+			None
+		}
+
+		async fn latest_block(&self) -> Option<Arc<SubstrateBlock>> {
+			None
+		}
+
+		async fn block_by_number(
+			&self,
+			_block_number: SubstrateBlockNumber,
+		) -> Result<Option<Arc<SubstrateBlock>>, ClientError> {
+			Ok(None)
+		}
+
+		async fn block_by_hash(
+			&self,
+			_hash: &H256,
+		) -> Result<Option<Arc<SubstrateBlock>>, ClientError> {
+			Ok(None)
+		}
+	}
+}
diff --git a/substrate/frame/revive/rpc/src/cli.rs b/substrate/frame/revive/rpc/src/cli.rs
index c0f81fcafd7..d63d596ab7a 100644
--- a/substrate/frame/revive/rpc/src/cli.rs
+++ b/substrate/frame/revive/rpc/src/cli.rs
@@ -16,7 +16,9 @@
 // limitations under the License.
 //! The Ethereum JSON-RPC server.
 use crate::{
-	client::Client, EthRpcServer, EthRpcServerImpl, SystemHealthRpcServer,
+	client::{connect, Client},
+	BlockInfoProvider, BlockInfoProviderImpl, CacheReceiptProvider, DBReceiptProvider,
+	EthRpcServer, EthRpcServerImpl, ReceiptProvider, SystemHealthRpcServer,
 	SystemHealthRpcServerImpl,
 };
 use clap::Parser;
@@ -27,6 +29,7 @@ use sc_service::{
 	config::{PrometheusConfig, RpcConfiguration},
 	start_rpc_servers, TaskManager,
 };
+use std::sync::Arc;
 
 // Default port if --prometheus-port is not specified
 const DEFAULT_PROMETHEUS_PORT: u16 = 9616;
@@ -42,6 +45,21 @@ pub struct CliCommand {
 	#[clap(long, default_value = "ws://127.0.0.1:9944")]
 	pub node_rpc_url: String,
 
+	/// The maximum number of blocks to cache in memory.
+	#[clap(long, default_value = "256")]
+	pub cache_size: usize,
+
+	/// The database used to store Ethereum transaction hashes.
+	/// This is only useful if the node needs to act as an archive node and respond to Ethereum RPC
+	/// queries for transactions that are not in the in memory cache.
+	#[clap(long)]
+	pub database_url: Option<String>,
+
+	/// If true, we will only read from the database and not write to it.
+	/// Only useful if `--database-url` is specified.
+	#[clap(long, default_value = "true")]
+	pub database_read_only: bool,
+
 	#[allow(missing_docs)]
 	#[clap(flatten)]
 	pub shared_params: SharedParams,
@@ -78,7 +96,16 @@ fn init_logger(params: &SharedParams) -> anyhow::Result<()> {
 
 /// Start the JSON-RPC server using the given command line arguments.
 pub fn run(cmd: CliCommand) -> anyhow::Result<()> {
-	let CliCommand { rpc_params, prometheus_params, node_rpc_url, shared_params, .. } = cmd;
+	let CliCommand {
+		rpc_params,
+		prometheus_params,
+		node_rpc_url,
+		cache_size,
+		database_url,
+		database_read_only,
+		shared_params,
+		..
+	} = cmd;
 
 	#[cfg(not(test))]
 	init_logger(&shared_params)?;
@@ -110,19 +137,42 @@ pub fn run(cmd: CliCommand) -> anyhow::Result<()> {
 
 	let tokio_runtime = sc_cli::build_runtime()?;
 	let tokio_handle = tokio_runtime.handle();
-	let signals = tokio_runtime.block_on(async { Signals::capture() })?;
 	let mut task_manager = TaskManager::new(tokio_handle.clone(), prometheus_registry)?;
 	let essential_spawn_handle = task_manager.spawn_essential_handle();
 
 	let gen_rpc_module = || {
 		let signals = tokio_runtime.block_on(async { Signals::capture() })?;
-		let fut = Client::from_url(&node_rpc_url, &essential_spawn_handle).fuse();
+		let fut = async {
+			let (api, rpc_client, rpc) = connect(&node_rpc_url).await?;
+			let block_provider: Arc<dyn BlockInfoProvider> =
+				Arc::new(BlockInfoProviderImpl::new(cache_size, api.clone(), rpc.clone()));
+			let receipt_provider: Arc<dyn ReceiptProvider> =
+				if let Some(database_url) = database_url.as_ref() {
+					Arc::new((
+						CacheReceiptProvider::default(),
+						DBReceiptProvider::new(
+							database_url,
+							database_read_only,
+							block_provider.clone(),
+						)
+						.await?,
+					))
+				} else {
+					Arc::new(CacheReceiptProvider::default())
+				};
+
+			let client =
+				Client::new(api, rpc_client, rpc, block_provider, receipt_provider).await?;
+			client.subscribe_and_cache_blocks(&essential_spawn_handle);
+			Ok::<_, crate::ClientError>(client)
+		}
+		.fuse();
 		pin_mut!(fut);
 
 		match tokio_handle.block_on(signals.try_until_signal(fut)) {
 			Ok(Ok(client)) => rpc_module(is_dev, client),
 			Ok(Err(err)) => {
-				log::error!("Error connecting to the node at {node_rpc_url}: {err}");
+				log::error!("Error initializing: {err:?}");
 				Err(sc_service::Error::Application(err.into()))
 			},
 			Err(_) => Err(sc_service::Error::Application("Client connection interrupted".into())),
@@ -142,6 +192,7 @@ pub fn run(cmd: CliCommand) -> anyhow::Result<()> {
 		start_rpc_servers(&rpc_config, prometheus_registry, tokio_handle, gen_rpc_module, None)?;
 
 	task_manager.keep_alive(rpc_server_handle);
+	let signals = tokio_runtime.block_on(async { Signals::capture() })?;
 	tokio_runtime.block_on(signals.run_until_signal(task_manager.future().fuse()))?;
 	Ok(())
 }
diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs
index de97844eccb..cd0effe7faf 100644
--- a/substrate/frame/revive/rpc/src/client.rs
+++ b/substrate/frame/revive/rpc/src/client.rs
@@ -17,30 +17,23 @@
 //! The client connects to the source substrate chain
 //! and is used by the rpc server to query and send transactions to the substrate chain.
 use crate::{
+	extract_receipts_from_block,
 	runtime::gas_from_fee,
 	subxt_client::{
-		revive::{calls::types::EthTransact, events::ContractEmitted},
-		runtime_types::pallet_revive::storage::ContractInfo,
+		revive::calls::types::EthTransact, runtime_types::pallet_revive::storage::ContractInfo,
 	},
-	LOG_TARGET,
+	BlockInfoProvider, ReceiptProvider, TransactionInfo, LOG_TARGET,
 };
-use futures::{stream, StreamExt};
 use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObjectOwned};
 use pallet_revive::{
-	create1,
 	evm::{
-		Block, BlockNumberOrTag, BlockNumberOrTagOrHash, Bytes256, GenericTransaction, Log,
-		ReceiptInfo, SyncingProgress, SyncingStatus, TransactionSigned, H160, H256, U256,
+		Block, BlockNumberOrTag, BlockNumberOrTagOrHash, GenericTransaction, ReceiptInfo,
+		SyncingProgress, SyncingStatus, TransactionSigned, H160, H256, U256,
 	},
 	EthTransactError, EthTransactInfo,
 };
-use sp_core::keccak_256;
 use sp_weights::Weight;
-use std::{
-	collections::{HashMap, VecDeque},
-	sync::Arc,
-	time::Duration,
-};
+use std::{ops::ControlFlow, sync::Arc, time::Duration};
 use subxt::{
 	backend::{
 		legacy::{rpc_methods::SystemHealth, LegacyRpcMethods},
@@ -54,11 +47,10 @@ use subxt::{
 	storage::Storage,
 	Config, OnlineClient,
 };
-use subxt_client::transaction_payment::events::TransactionFeePaid;
 use thiserror::Error;
-use tokio::sync::{watch::Sender, RwLock};
+use tokio::{sync::RwLock, try_join};
 
-use crate::subxt_client::{self, system::events::ExtrinsicSuccess, SrcChainConfig};
+use crate::subxt_client::{self, SrcChainConfig};
 
 /// The substrate block type.
 pub type SubstrateBlock = subxt::blocks::Block<SrcChainConfig, OnlineClient<SrcChainConfig>>;
@@ -75,29 +67,6 @@ pub type Shared<T> = Arc<RwLock<T>>;
 /// The runtime balance type.
 pub type Balance = u128;
 
-/// The cache maintains a buffer of the last N blocks,
-#[derive(Default)]
-struct BlockCache<const N: usize> {
-	/// A double-ended queue of the last N blocks.
-	/// The most recent block is at the back of the queue, and the oldest block is at the front.
-	buffer: VecDeque<Arc<SubstrateBlock>>,
-
-	/// A map of blocks by block number.
-	blocks_by_number: HashMap<SubstrateBlockNumber, Arc<SubstrateBlock>>,
-
-	/// A map of blocks by block hash.
-	blocks_by_hash: HashMap<H256, Arc<SubstrateBlock>>,
-
-	/// A map of receipts by hash.
-	receipts_by_hash: HashMap<H256, ReceiptInfo>,
-
-	/// A map of Signed transaction by hash.
-	signed_tx_by_hash: HashMap<H256, TransactionSigned>,
-
-	/// A map of receipt hashes by block hash.
-	tx_hashes_by_block_and_index: HashMap<H256, HashMap<U256, H256>>,
-}
-
 /// Unwrap the original `jsonrpsee::core::client::Error::Call` error.
 fn unwrap_call_err(err: &subxt::error::RpcError) -> Option<ErrorObjectOwned> {
 	use subxt::backend::rpc::reconnecting_rpc_client;
@@ -167,6 +136,9 @@ pub enum ClientError {
 	/// A [`RpcError`] wrapper error.
 	#[error(transparent)]
 	RpcError(#[from] RpcError),
+	/// A [`sqlx::Error`] wrapper error.
+	#[error(transparent)]
+	SqlxError(#[from] sqlx::Error),
 	/// A [`codec::Error`] wrapper error.
 	#[error(transparent)]
 	CodecError(#[from] codec::Error),
@@ -179,9 +151,18 @@ pub enum ClientError {
 	/// The block hash was not found.
 	#[error("hash not found")]
 	BlockNotFound,
+
+	#[error("No Ethereum extrinsic found")]
+	EthExtrinsicNotFound,
 	/// The transaction fee could not be found
 	#[error("transactionFeePaid event not found")]
 	TxFeeNotFound,
+	/// Failed to decode a raw payload into a signed transaction.
+	#[error("Failed to decode a raw payload into a signed transaction")]
+	TxDecodingFailed,
+	/// Failed to recover eth address.
+	#[error("failed to recover eth address")]
+	RecoverEthAddressFailed,
 	/// The cache is empty.
 	#[error("cache is empty")]
 	CacheEmpty,
@@ -214,163 +195,18 @@ impl From<ClientError> for ErrorObjectOwned {
 	}
 }
 
-/// The number of recent blocks maintained by the cache.
-/// For each block in the cache, we also store the EVM transaction receipts.
-pub const CACHE_SIZE: usize = 256;
-
-impl<const N: usize> BlockCache<N> {
-	fn latest_block(&self) -> Option<&Arc<SubstrateBlock>> {
-		self.buffer.back()
-	}
-
-	/// Insert an entry into the cache, and prune the oldest entry if the cache is full.
-	fn insert(&mut self, block: SubstrateBlock) {
-		if self.buffer.len() >= N {
-			if let Some(block) = self.buffer.pop_front() {
-				log::trace!(target: LOG_TARGET, "Pruning block: {}", block.number());
-				let hash = block.hash();
-				self.blocks_by_hash.remove(&hash);
-				self.blocks_by_number.remove(&block.number());
-				if let Some(entries) = self.tx_hashes_by_block_and_index.remove(&hash) {
-					for hash in entries.values() {
-						self.receipts_by_hash.remove(hash);
-					}
-				}
-			}
-		}
-
-		let block = Arc::new(block);
-		self.buffer.push_back(block.clone());
-		self.blocks_by_number.insert(block.number(), block.clone());
-		self.blocks_by_hash.insert(block.hash(), block);
-	}
-}
-
 /// A client connect to a node and maintains a cache of the last `CACHE_SIZE` blocks.
 #[derive(Clone)]
 pub struct Client {
-	/// The inner state of the client.
-	inner: Arc<ClientInner>,
-	/// A watch channel to signal cache updates.
-	pub updates: tokio::sync::watch::Receiver<()>,
-}
-
-/// The inner state of the client.
-struct ClientInner {
 	api: OnlineClient<SrcChainConfig>,
 	rpc_client: ReconnectingRpcClient,
 	rpc: LegacyRpcMethods<SrcChainConfig>,
-	cache: Shared<BlockCache<CACHE_SIZE>>,
+	receipt_provider: Arc<dyn ReceiptProvider>,
+	block_provider: Arc<dyn BlockInfoProvider>,
 	chain_id: u64,
 	max_block_weight: Weight,
 }
 
-impl ClientInner {
-	/// Create a new client instance connecting to the substrate node at the given URL.
-	async fn from_url(url: &str) -> Result<Self, ClientError> {
-		let rpc_client = ReconnectingRpcClient::builder()
-			.retry_policy(ExponentialBackoff::from_millis(100).max_delay(Duration::from_secs(10)))
-			.build(url.to_string())
-			.await?;
-
-		let api = OnlineClient::<SrcChainConfig>::from_rpc_client(rpc_client.clone()).await?;
-		let cache = Arc::new(RwLock::new(BlockCache::<CACHE_SIZE>::default()));
-
-		let rpc = LegacyRpcMethods::<SrcChainConfig>::new(RpcClient::new(rpc_client.clone()));
-
-		let (chain_id, max_block_weight) =
-			tokio::try_join!(chain_id(&api), max_block_weight(&api))?;
-
-		Ok(Self { api, rpc_client, rpc, cache, chain_id, max_block_weight })
-	}
-
-	/// Get the receipt infos from the extrinsics in a block.
-	async fn receipt_infos(
-		&self,
-		block: &SubstrateBlock,
-	) -> Result<HashMap<H256, (TransactionSigned, ReceiptInfo)>, ClientError> {
-		// Get extrinsics from the block
-		let extrinsics = block.extrinsics().await?;
-
-		// Filter extrinsics from pallet_revive
-		let extrinsics = extrinsics.iter().flat_map(|ext| {
-			let call = ext.as_extrinsic::<EthTransact>().ok()??;
-			let transaction_hash = H256(keccak_256(&call.payload));
-			let signed_tx = TransactionSigned::decode(&call.payload).ok()?;
-			let from = signed_tx.recover_eth_address().ok()?;
-			let tx_info = GenericTransaction::from_signed(signed_tx.clone(), Some(from));
-			let contract_address = if tx_info.to.is_none() {
-				Some(create1(&from, tx_info.nonce.unwrap_or_default().try_into().ok()?))
-			} else {
-				None
-			};
-
-			Some((from, signed_tx, tx_info, transaction_hash, contract_address, ext))
-		});
-
-		// Map each extrinsic to a receipt
-		stream::iter(extrinsics)
-			.map(|(from, signed_tx, tx_info, transaction_hash, contract_address, ext)| async move {
-				let events = ext.events().await?;
-				let tx_fees =
-					events.find_first::<TransactionFeePaid>()?.ok_or(ClientError::TxFeeNotFound)?;
-
-				let gas_price = tx_info.gas_price.unwrap_or_default();
-				let gas_used = (tx_fees.tip.saturating_add(tx_fees.actual_fee))
-					.checked_div(gas_price.as_u128())
-					.unwrap_or_default();
-
-				let success = events.has::<ExtrinsicSuccess>()?;
-				let transaction_index = ext.index();
-				let block_hash = block.hash();
-				let block_number = block.number().into();
-
-				// get logs from ContractEmitted event
-				let logs = events.iter()
-					.filter_map(|event_details| {
-						let event_details = event_details.ok()?;
-						let event = event_details.as_event::<ContractEmitted>().ok()??;
-
-						Some(Log {
-							address: event.contract,
-							topics: event.topics,
-							data: Some(event.data.into()),
-							block_number: Some(block_number),
-							transaction_hash,
-							transaction_index: Some(transaction_index.into()),
-							block_hash: Some(block_hash),
-							log_index: Some(event_details.index().into()),
-							..Default::default()
-						})
-					}).collect();
-
-
-				log::debug!(target: LOG_TARGET, "Adding receipt for tx hash: {transaction_hash:?} - block: {block_number:?}");
-				let receipt = ReceiptInfo::new(
-					block_hash,
-					block_number,
-					contract_address,
-					from,
-					logs,
-					tx_info.to,
-					gas_price,
-					gas_used.into(),
-					success,
-					transaction_hash,
-					transaction_index.into(),
-					tx_info.r#type.unwrap_or_default()
-				);
-
-				Ok::<_, ClientError>((receipt.transaction_hash, (signed_tx, receipt)))
-			})
-			.buffer_unordered(10)
-			.collect::<Vec<Result<_, _>>>()
-			.await
-			.into_iter()
-			.collect::<Result<HashMap<_, _>, _>>()
-	}
-}
-
 /// Fetch the chain ID from the substrate chain.
 async fn chain_id(api: &OnlineClient<SrcChainConfig>) -> Result<u64, ClientError> {
 	let query = subxt_client::constants().revive().chain_id();
@@ -395,23 +231,181 @@ async fn extract_block_timestamp(block: &SubstrateBlock) -> Option<u64> {
 	Some(ext.value.now / 1000)
 }
 
+/// Connect to a node at the given URL, and return the underlying API, RPC client, and legacy RPC
+/// clients.
+pub async fn connect(
+	node_rpc_url: &str,
+) -> Result<
+	(OnlineClient<SrcChainConfig>, ReconnectingRpcClient, LegacyRpcMethods<SrcChainConfig>),
+	ClientError,
+> {
+	log::info!(target: LOG_TARGET, "Connecting to node at: {node_rpc_url} ...");
+	let rpc_client = ReconnectingRpcClient::builder()
+		.retry_policy(ExponentialBackoff::from_millis(100).max_delay(Duration::from_secs(10)))
+		.build(node_rpc_url.to_string())
+		.await?;
+	log::info!(target: LOG_TARGET, "Connected to node at: {node_rpc_url}");
+
+	let api = OnlineClient::<SrcChainConfig>::from_rpc_client(rpc_client.clone()).await?;
+	let rpc = LegacyRpcMethods::<SrcChainConfig>::new(RpcClient::new(rpc_client.clone()));
+	Ok((api, rpc_client, rpc))
+}
+
 impl Client {
 	/// Create a new client instance.
-	/// The client will subscribe to new blocks and maintain a cache of [`CACHE_SIZE`] blocks.
-	pub async fn from_url(
-		url: &str,
-		spawn_handle: &sc_service::SpawnEssentialTaskHandle,
+	pub async fn new(
+		api: OnlineClient<SrcChainConfig>,
+		rpc_client: ReconnectingRpcClient,
+		rpc: LegacyRpcMethods<SrcChainConfig>,
+		block_provider: Arc<dyn BlockInfoProvider>,
+		receipt_provider: Arc<dyn ReceiptProvider>,
 	) -> Result<Self, ClientError> {
-		log::info!(target: LOG_TARGET, "Connecting to node at: {url} ...");
-		let inner: Arc<ClientInner> = Arc::new(ClientInner::from_url(url).await?);
-		log::info!(target: LOG_TARGET, "Connected to node at: {url}");
+		let (chain_id, max_block_weight) =
+			tokio::try_join!(chain_id(&api), max_block_weight(&api))?;
 
-		let (tx, mut updates) = tokio::sync::watch::channel(());
+		Ok(Self {
+			api,
+			rpc_client,
+			rpc,
+			receipt_provider,
+			block_provider,
+			chain_id,
+			max_block_weight,
+		})
+	}
 
-		spawn_handle.spawn("subscribe-blocks", None, Self::subscribe_blocks(inner.clone(), tx));
+	/// Subscribe to past blocks executing the callback for each block.
+	/// The subscription continues iterating past blocks until the closure returns
+	/// `ControlFlow::Break`. Blocks are iterated starting from the latest block and moving
+	/// backward.
+	#[allow(dead_code)]
+	async fn subscribe_past_blocks<F, Fut>(&self, callback: F) -> Result<(), ClientError>
+	where
+		F: Fn(SubstrateBlock) -> Fut + Send + Sync,
+		Fut: std::future::Future<Output = Result<ControlFlow<()>, ClientError>> + Send,
+	{
+		log::info!(target: LOG_TARGET, "Subscribing to past blocks");
+		let mut block = self.api.blocks().at_latest().await.inspect_err(|err| {
+			log::error!(target: LOG_TARGET, "Failed to fetch latest block: {err:?}");
+		})?;
+
+		loop {
+			let block_number = block.number();
+			log::debug!(target: LOG_TARGET, "Processing block {block_number}");
+
+			let parent_hash = block.header().parent_hash;
+			let control_flow = callback(block).await.inspect_err(|err| {
+				log::error!(target: LOG_TARGET, "Failed to process block {block_number}: {err:?}");
+			})?;
+
+			match control_flow {
+				ControlFlow::Continue(_) => {
+					if block_number == 0 {
+						log::info!(target: LOG_TARGET, "All past blocks processed");
+						return Ok(());
+					}
+					block = self.api.blocks().at(parent_hash).await.inspect_err(|err| {
+						log::error!(target: LOG_TARGET, "Failed to fetch block at {parent_hash:?}: {err:?}");
+					})?;
+				},
+				ControlFlow::Break(_) => {
+					log::info!(target: LOG_TARGET, "Stopping past block subscription at {block_number}");
+					return Ok(());
+				},
+			}
+		}
+	}
+
+	/// Subscribe to new best blocks, and execute the async closure with
+	/// the extracted block and ethereum transactions
+	async fn subscribe_new_blocks<F, Fut>(&self, callback: F) -> Result<(), ClientError>
+	where
+		F: Fn(SubstrateBlock) -> Fut + Send + Sync,
+		Fut: std::future::Future<Output = Result<(), ClientError>> + Send,
+	{
+		log::info!(target: LOG_TARGET, "Subscribing to new blocks");
+		let mut block_stream = match self.api.blocks().subscribe_best().await {
+			Ok(s) => s,
+			Err(err) => {
+				log::error!(target: LOG_TARGET, "Failed to subscribe to blocks: {err:?}");
+				return Err(err.into());
+			},
+		};
+
+		while let Some(block) = block_stream.next().await {
+			let block = match block {
+				Ok(block) => block,
+				Err(err) => {
+					if err.is_disconnected_will_reconnect() {
+						log::warn!(
+							target: LOG_TARGET,
+							"The RPC connection was lost and we may have missed a few blocks"
+						);
+						continue;
+					}
+
+					log::error!(target: LOG_TARGET, "Failed to fetch block: {err:?}");
+					return Err(err.into());
+				},
+			};
+
+			log::debug!(target: LOG_TARGET, "Pushing block: {}", block.number());
+			callback(block).await?;
+		}
 
-		updates.changed().await.expect("tx is not dropped");
-		Ok(Self { inner, updates })
+		log::info!(target: LOG_TARGET, "Block subscription ended");
+		Ok(())
+	}
+
+	/// Start the block subscription, and populate the block cache.
+	pub fn subscribe_and_cache_blocks(&self, spawn_handle: &sc_service::SpawnEssentialTaskHandle) {
+		let client = self.clone();
+		spawn_handle.spawn("subscribe-blocks", None, async move {
+			let res = client
+				.subscribe_new_blocks(|block| async {
+					let receipts = extract_receipts_from_block(&block).await?;
+
+					client.receipt_provider.insert(&block.hash(), &receipts).await;
+					if let Some(pruned) = client.block_provider.cache_block(block).await {
+						client.receipt_provider.remove(&pruned).await;
+					}
+
+					Ok(())
+				})
+				.await;
+
+			if let Err(err) = res {
+				log::error!(target: LOG_TARGET, "Block subscription error: {err:?}");
+			}
+		});
+	}
+
+	/// Start the block subscription, and populate the block cache.
+	pub async fn subscribe_and_cache_receipts(
+		&self,
+		oldest_block: Option<SubstrateBlockNumber>,
+	) -> Result<(), ClientError> {
+		let new_blocks_fut = self.subscribe_new_blocks(|block| async move {
+			let receipts = extract_receipts_from_block(&block).await.inspect_err(|err| {
+				log::error!(target: LOG_TARGET, "Failed to extract receipts from block: {err:?}");
+			})?;
+			self.receipt_provider.insert(&block.hash(), &receipts).await;
+			Ok(())
+		});
+
+		let Some(oldest_block) = oldest_block else { return new_blocks_fut.await };
+
+		let old_blocks_fut = self.subscribe_past_blocks(|block| async move {
+			let receipts = extract_receipts_from_block(&block).await?;
+			self.receipt_provider.insert(&block.hash(), &receipts).await;
+			if block.number() == oldest_block {
+				Ok(ControlFlow::Break(()))
+			} else {
+				Ok(ControlFlow::Continue(()))
+			}
+		});
+
+		try_join!(new_blocks_fut, old_blocks_fut).map(|_| ())
 	}
 
 	/// Expose the storage API.
@@ -425,14 +419,14 @@ impl Client {
 					(*block_number).try_into().map_err(|_| ClientError::ConversionFailed)?;
 
 				let hash = self.get_block_hash(n).await?.ok_or(ClientError::BlockNotFound)?;
-				Ok(self.inner.api.storage().at(hash))
+				Ok(self.api.storage().at(hash))
 			},
-			BlockNumberOrTagOrHash::H256(hash) => Ok(self.inner.api.storage().at(*hash)),
+			BlockNumberOrTagOrHash::H256(hash) => Ok(self.api.storage().at(*hash)),
 			BlockNumberOrTagOrHash::BlockTag(_) => {
 				if let Some(block) = self.latest_block().await {
-					return Ok(self.inner.api.storage().at(block.hash()));
+					return Ok(self.api.storage().at(block.hash()));
 				}
-				let storage = self.inner.api.storage().at_latest().await?;
+				let storage = self.api.storage().at_latest().await?;
 				Ok(storage)
 			},
 		}
@@ -452,90 +446,24 @@ impl Client {
 					(*block_number).try_into().map_err(|_| ClientError::ConversionFailed)?;
 
 				let hash = self.get_block_hash(n).await?.ok_or(ClientError::BlockNotFound)?;
-				Ok(self.inner.api.runtime_api().at(hash))
+				Ok(self.api.runtime_api().at(hash))
 			},
-			BlockNumberOrTagOrHash::H256(hash) => Ok(self.inner.api.runtime_api().at(*hash)),
+			BlockNumberOrTagOrHash::H256(hash) => Ok(self.api.runtime_api().at(*hash)),
 			BlockNumberOrTagOrHash::BlockTag(_) => {
 				if let Some(block) = self.latest_block().await {
-					return Ok(self.inner.api.runtime_api().at(block.hash()));
+					return Ok(self.api.runtime_api().at(block.hash()));
 				}
 
-				let api = self.inner.api.runtime_api().at_latest().await?;
+				let api = self.api.runtime_api().at_latest().await?;
 				Ok(api)
 			},
 		}
 	}
 
-	/// Subscribe to new blocks and update the cache.
-	async fn subscribe_blocks(inner: Arc<ClientInner>, tx: Sender<()>) {
-		log::info!(target: LOG_TARGET, "Subscribing to new blocks");
-		let mut block_stream = match inner.as_ref().api.blocks().subscribe_best().await {
-			Ok(s) => s,
-			Err(err) => {
-				log::error!(target: LOG_TARGET, "Failed to subscribe to blocks: {err:?}");
-				return;
-			},
-		};
-
-		while let Some(block) = block_stream.next().await {
-			let block = match block {
-				Ok(block) => block,
-				Err(err) => {
-					if err.is_disconnected_will_reconnect() {
-						log::warn!(
-							target: LOG_TARGET,
-							"The RPC connection was lost and we may have missed a few blocks"
-						);
-						continue;
-					}
-
-					log::error!(target: LOG_TARGET, "Failed to fetch block: {err:?}");
-					return;
-				},
-			};
-
-			log::trace!(target: LOG_TARGET, "Pushing block: {}", block.number());
-			let mut cache = inner.cache.write().await;
-
-			let receipts = inner
-				.receipt_infos(&block)
-				.await
-				.inspect_err(|err| {
-					log::error!(target: LOG_TARGET, "Failed to get receipts: {err:?}");
-				})
-				.unwrap_or_default();
-
-			if !receipts.is_empty() {
-				let values = receipts
-					.iter()
-					.map(|(hash, (_, receipt))| (receipt.transaction_index, *hash))
-					.collect::<HashMap<_, _>>();
-
-				cache.tx_hashes_by_block_and_index.insert(block.hash(), values);
-
-				cache
-					.receipts_by_hash
-					.extend(receipts.iter().map(|(hash, (_, receipt))| (*hash, receipt.clone())));
-
-				cache.signed_tx_by_hash.extend(
-					receipts.iter().map(|(hash, (signed_tx, _))| (*hash, signed_tx.clone())),
-				)
-			}
-
-			cache.insert(block);
-			tx.send_replace(());
-		}
-
-		log::info!(target: LOG_TARGET, "Block subscription ended");
-	}
-}
-
-impl Client {
 	/// Get the most recent block stored in the cache.
 	pub async fn latest_block(&self) -> Option<Arc<SubstrateBlock>> {
-		let cache = self.inner.cache.read().await;
-		let block = cache.latest_block()?;
-		Some(block.clone())
+		let block = self.block_provider.latest_block().await?;
+		Some(block)
 	}
 
 	/// Expose the transaction API.
@@ -543,23 +471,22 @@ impl Client {
 		&self,
 		call: subxt::tx::DefaultPayload<EthTransact>,
 	) -> Result<H256, ClientError> {
-		let ext = self.inner.api.tx().create_unsigned(&call).map_err(ClientError::from)?;
+		let ext = self.api.tx().create_unsigned(&call).map_err(ClientError::from)?;
 		let hash = ext.submit().await?;
 		Ok(hash)
 	}
 
 	/// Get an EVM transaction receipt by hash.
 	pub async fn receipt(&self, tx_hash: &H256) -> Option<ReceiptInfo> {
-		let cache = self.inner.cache.read().await;
-		cache.receipts_by_hash.get(tx_hash).cloned()
+		self.receipt_provider.receipt_by_hash(tx_hash).await
 	}
 
 	/// Get the syncing status of the chain.
 	pub async fn syncing(&self) -> Result<SyncingStatus, ClientError> {
-		let health = self.inner.rpc.system_health().await?;
+		let health = self.rpc.system_health().await?;
 
 		let status = if health.is_syncing {
-			let client = RpcClient::new(self.inner.rpc_client.clone());
+			let client = RpcClient::new(self.rpc_client.clone());
 			let sync_state: sc_rpc::system::SyncState<SubstrateBlockNumber> =
 				client.request("system_syncState", Default::default()).await?;
 
@@ -582,27 +509,23 @@ impl Client {
 		block_hash: &H256,
 		transaction_index: &U256,
 	) -> Option<ReceiptInfo> {
-		let cache = self.inner.cache.read().await;
-		let receipt_hash =
-			cache.tx_hashes_by_block_and_index.get(block_hash)?.get(transaction_index)?;
-		let receipt = cache.receipts_by_hash.get(receipt_hash)?;
-		Some(receipt.clone())
+		self.receipt_provider
+			.receipt_by_block_hash_and_index(block_hash, transaction_index)
+			.await
 	}
 
 	pub async fn signed_tx_by_hash(&self, tx_hash: &H256) -> Option<TransactionSigned> {
-		let cache = self.inner.cache.read().await;
-		cache.signed_tx_by_hash.get(tx_hash).cloned()
+		self.receipt_provider.signed_tx_by_hash(tx_hash).await
 	}
 
 	/// Get receipts count per block.
 	pub async fn receipts_count_per_block(&self, block_hash: &SubstrateBlockHash) -> Option<usize> {
-		let cache = self.inner.cache.read().await;
-		cache.tx_hashes_by_block_and_index.get(block_hash).map(|v| v.len())
+		self.receipt_provider.receipts_count_per_block(block_hash).await
 	}
 
 	/// Get the system health.
 	pub async fn system_health(&self) -> Result<SystemHealth, ClientError> {
-		let health = self.inner.rpc.system_health().await?;
+		let health = self.rpc.system_health().await?;
 		Ok(health)
 	}
 
@@ -697,8 +620,8 @@ impl Client {
 
 	/// Get the block number of the latest block.
 	pub async fn block_number(&self) -> Result<SubstrateBlockNumber, ClientError> {
-		let cache = self.inner.cache.read().await;
-		let latest_block = cache.buffer.back().ok_or(ClientError::CacheEmpty)?;
+		let latest_block =
+			self.block_provider.latest_block().await.ok_or(ClientError::CacheEmpty)?;
 		Ok(latest_block.number())
 	}
 
@@ -707,13 +630,8 @@ impl Client {
 		&self,
 		block_number: SubstrateBlockNumber,
 	) -> Result<Option<SubstrateBlockHash>, ClientError> {
-		let cache = self.inner.cache.read().await;
-		if let Some(block) = cache.blocks_by_number.get(&block_number) {
-			return Ok(Some(block.hash()));
-		}
-
-		let hash = self.inner.rpc.chain_get_block_hash(Some(block_number.into())).await?;
-		Ok(hash)
+		let maybe_block = self.block_provider.block_by_number(block_number).await?;
+		Ok(maybe_block.map(|block| block.hash()))
 	}
 
 	/// Get a block for the specified hash or number.
@@ -727,8 +645,8 @@ impl Client {
 				self.block_by_number(n).await
 			},
 			BlockNumberOrTag::BlockTag(_) => {
-				let cache = self.inner.cache.read().await;
-				Ok(cache.buffer.back().cloned())
+				let block = self.block_provider.latest_block().await;
+				Ok(block)
 			},
 		}
 	}
@@ -738,16 +656,7 @@ impl Client {
 		&self,
 		hash: &SubstrateBlockHash,
 	) -> Result<Option<Arc<SubstrateBlock>>, ClientError> {
-		let cache = self.inner.cache.read().await;
-		if let Some(block) = cache.blocks_by_hash.get(hash) {
-			return Ok(Some(block.clone()));
-		}
-
-		match self.inner.api.blocks().at(*hash).await {
-			Ok(block) => Ok(Some(Arc::new(block))),
-			Err(subxt::Error::Block(subxt::error::BlockError::NotFound(_))) => Ok(None),
-			Err(err) => Err(err.into()),
-		}
+		self.block_provider.block_by_hash(hash).await
 	}
 
 	/// Get a block by number
@@ -755,21 +664,16 @@ impl Client {
 		&self,
 		block_number: SubstrateBlockNumber,
 	) -> Result<Option<Arc<SubstrateBlock>>, ClientError> {
-		let cache = self.inner.cache.read().await;
-		if let Some(block) = cache.blocks_by_number.get(&block_number) {
-			return Ok(Some(block.clone()));
-		}
-
-		let Some(hash) = self.get_block_hash(block_number).await? else {
-			return Ok(None);
-		};
-
-		self.block_by_hash(&hash).await
+		self.block_provider.block_by_number(block_number).await
 	}
 
 	/// Get the EVM block for the given hash.
-	pub async fn evm_block(&self, block: Arc<SubstrateBlock>) -> Result<Block, ClientError> {
-		let runtime_api = self.inner.api.runtime_api().at(block.hash());
+	pub async fn evm_block(
+		&self,
+		block: Arc<SubstrateBlock>,
+		hydrated_transactions: bool,
+	) -> Result<Block, ClientError> {
+		let runtime_api = self.api.runtime_api().at(block.hash());
 		let max_fee = Self::weight_to_fee(&runtime_api, self.max_block_weight()).await?;
 		let gas_limit = gas_from_fee(max_fee);
 
@@ -781,6 +685,23 @@ impl Client {
 		let state_root = header.state_root.0.into();
 		let extrinsics_root = header.extrinsics_root.0.into();
 
+		let receipts = extract_receipts_from_block(&block).await?;
+		let gas_used =
+			receipts.iter().fold(U256::zero(), |acc, (_, receipt)| acc + receipt.gas_used);
+		let transactions = if hydrated_transactions {
+			receipts
+				.into_iter()
+				.map(|(signed_tx, receipt)| TransactionInfo::new(receipt, signed_tx))
+				.collect::<Vec<TransactionInfo>>()
+				.into()
+		} else {
+			receipts
+				.into_iter()
+				.map(|(_, receipt)| receipt.transaction_hash)
+				.collect::<Vec<_>>()
+				.into()
+		};
+
 		Ok(Block {
 			hash: block.hash(),
 			parent_hash,
@@ -789,9 +710,11 @@ impl Client {
 			number: header.number.into(),
 			timestamp: timestamp.into(),
 			difficulty: Some(0u32.into()),
+			base_fee_per_gas: Some(crate::GAS_PRICE.into()),
 			gas_limit,
-			logs_bloom: Bytes256([0u8; 256]),
+			gas_used,
 			receipts_root: extrinsics_root,
+			transactions,
 			..Default::default()
 		})
 	}
@@ -811,11 +734,11 @@ impl Client {
 
 	/// Get the chain ID.
 	pub fn chain_id(&self) -> u64 {
-		self.inner.chain_id
+		self.chain_id
 	}
 
 	/// Get the Max Block Weight.
 	pub fn max_block_weight(&self) -> Weight {
-		self.inner.max_block_weight
+		self.max_block_weight
 	}
 }
diff --git a/substrate/frame/revive/rpc/src/eth-indexer.rs b/substrate/frame/revive/rpc/src/eth-indexer.rs
new file mode 100644
index 00000000000..3e7f6b6fa91
--- /dev/null
+++ b/substrate/frame/revive/rpc/src/eth-indexer.rs
@@ -0,0 +1,88 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//! The Ethereum JSON-RPC server.
+use clap::Parser;
+use pallet_revive_eth_rpc::{
+	client::{connect, Client, SubstrateBlockNumber},
+	BlockInfoProvider, BlockInfoProviderImpl, DBReceiptProvider, ReceiptProvider,
+};
+use sc_cli::SharedParams;
+use std::sync::Arc;
+
+// Parsed command instructions from the command line
+#[derive(Parser, Debug)]
+#[clap(author, about, version)]
+pub struct CliCommand {
+	/// The node url to connect to
+	#[clap(long, default_value = "ws://127.0.0.1:9944")]
+	pub node_rpc_url: String,
+
+	/// Specifies the block number to start indexing from, going backwards from the current block.
+	/// If not provided, only new blocks will be indexed
+	#[clap(long)]
+	pub oldest_block: Option<SubstrateBlockNumber>,
+
+	/// The database used to store Ethereum transaction hashes.
+	#[clap(long)]
+	pub database_url: String,
+
+	#[allow(missing_docs)]
+	#[clap(flatten)]
+	pub shared_params: SharedParams,
+}
+
+/// Initialize the logger
+#[cfg(not(test))]
+fn init_logger(params: &SharedParams) -> anyhow::Result<()> {
+	let mut logger = sc_cli::LoggerBuilder::new(params.log_filters().join(","));
+	logger
+		.with_log_reloading(params.enable_log_reloading)
+		.with_detailed_output(params.detailed_log_output);
+
+	if let Some(tracing_targets) = &params.tracing_targets {
+		let tracing_receiver = params.tracing_receiver.into();
+		logger.with_profiling(tracing_receiver, tracing_targets);
+	}
+
+	if params.disable_log_color {
+		logger.with_colors(false);
+	}
+
+	logger.init()?;
+	Ok(())
+}
+
+#[tokio::main]
+pub async fn main() -> anyhow::Result<()> {
+	let CliCommand {
+		node_rpc_url, database_url, shared_params: _shared_params, oldest_block, ..
+	} = CliCommand::parse();
+
+	#[cfg(not(test))]
+	init_logger(&_shared_params)?;
+
+	let (api, rpc_client, rpc) = connect(&node_rpc_url).await?;
+	let block_provider: Arc<dyn BlockInfoProvider> =
+		Arc::new(BlockInfoProviderImpl::new(0, api.clone(), rpc.clone()));
+	let receipt_provider: Arc<dyn ReceiptProvider> =
+		Arc::new(DBReceiptProvider::new(&database_url, false, block_provider.clone()).await?);
+
+	let client = Client::new(api, rpc_client, rpc, block_provider, receipt_provider).await?;
+	client.subscribe_and_cache_receipts(oldest_block).await?;
+
+	Ok(())
+}
diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs
index 230f2f8b7ef..5e1341e2a29 100644
--- a/substrate/frame/revive/rpc/src/lib.rs
+++ b/substrate/frame/revive/rpc/src/lib.rs
@@ -24,6 +24,7 @@ use jsonrpsee::{
 	types::{ErrorCode, ErrorObjectOwned},
 };
 use pallet_revive::evm::*;
+use sp_arithmetic::Permill;
 use sp_core::{keccak_256, H160, H256, U256};
 use thiserror::Error;
 
@@ -35,6 +36,12 @@ pub mod subxt_client;
 #[cfg(test)]
 mod tests;
 
+mod block_info_provider;
+pub use block_info_provider::*;
+
+mod receipt_provider;
+pub use receipt_provider::*;
+
 mod rpc_health;
 pub use rpc_health::*;
 
@@ -121,7 +128,12 @@ impl EthRpcServer for EthRpcServerImpl {
 		transaction_hash: H256,
 	) -> RpcResult<Option<ReceiptInfo>> {
 		let receipt = self.client.receipt(&transaction_hash).await;
-		log::debug!(target: LOG_TARGET, "transaction_receipt for {transaction_hash:?}: {}", receipt.is_some());
+		log::debug!(
+			target: LOG_TARGET,
+			"transaction_receipt for {transaction_hash:?}: received: {received} - success: {success:?}",
+			received = receipt.is_some(),
+			success = receipt.as_ref().map(|r| r.status == Some(U256::one()))
+		);
 		Ok(receipt)
 	}
 
@@ -197,12 +209,12 @@ impl EthRpcServer for EthRpcServerImpl {
 	async fn get_block_by_hash(
 		&self,
 		block_hash: H256,
-		_hydrated_transactions: bool,
+		hydrated_transactions: bool,
 	) -> RpcResult<Option<Block>> {
 		let Some(block) = self.client.block_by_hash(&block_hash).await? else {
 			return Ok(None);
 		};
-		let block = self.client.evm_block(block).await?;
+		let block = self.client.evm_block(block, hydrated_transactions).await?;
 		Ok(Some(block))
 	}
 
@@ -220,6 +232,11 @@ impl EthRpcServer for EthRpcServerImpl {
 		Ok(U256::from(GAS_PRICE))
 	}
 
+	async fn max_priority_fee_per_gas(&self) -> RpcResult<U256> {
+		// TODO: Provide better estimation
+		Ok(U256::from(Permill::from_percent(20).mul_ceil(GAS_PRICE)))
+	}
+
 	async fn get_code(&self, address: H160, block: BlockNumberOrTagOrHash) -> RpcResult<Bytes> {
 		let code = self.client.get_contract_code(&address, block).await?;
 		Ok(code.into())
@@ -232,12 +249,12 @@ impl EthRpcServer for EthRpcServerImpl {
 	async fn get_block_by_number(
 		&self,
 		block: BlockNumberOrTag,
-		_hydrated_transactions: bool,
+		hydrated_transactions: bool,
 	) -> RpcResult<Option<Block>> {
 		let Some(block) = self.client.block_by_number_or_tag(&block).await? else {
 			return Ok(None);
 		};
-		let block = self.client.evm_block(block).await?;
+		let block = self.client.evm_block(block, hydrated_transactions).await?;
 		Ok(Some(block))
 	}
 
diff --git a/substrate/frame/revive/rpc/src/receipt_provider.rs b/substrate/frame/revive/rpc/src/receipt_provider.rs
new file mode 100644
index 00000000000..5c102b3d3d4
--- /dev/null
+++ b/substrate/frame/revive/rpc/src/receipt_provider.rs
@@ -0,0 +1,240 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::{
+	client::SubstrateBlock,
+	subxt_client::{
+		revive::{calls::types::EthTransact, events::ContractEmitted},
+		system::events::ExtrinsicSuccess,
+		transaction_payment::events::TransactionFeePaid,
+		SrcChainConfig,
+	},
+	ClientError, LOG_TARGET,
+};
+use futures::{stream, StreamExt};
+use jsonrpsee::core::async_trait;
+use pallet_revive::{
+	create1,
+	evm::{GenericTransaction, Log, ReceiptInfo, TransactionSigned, H256, U256},
+};
+use sp_core::keccak_256;
+use tokio::join;
+
+mod cache;
+pub use cache::CacheReceiptProvider;
+
+mod db;
+pub use db::DBReceiptProvider;
+
+/// Provide means to store and retrieve receipts.
+#[async_trait]
+pub trait ReceiptProvider: Send + Sync {
+	/// Insert receipts into the provider.
+	async fn insert(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]);
+
+	/// Remove receipts with the given block hash.
+	async fn remove(&self, block_hash: &H256);
+
+	/// Get the receipt for the given block hash and transaction index.
+	async fn receipt_by_block_hash_and_index(
+		&self,
+		block_hash: &H256,
+		transaction_index: &U256,
+	) -> Option<ReceiptInfo>;
+
+	/// Get the number of receipts per block.
+	async fn receipts_count_per_block(&self, block_hash: &H256) -> Option<usize>;
+
+	/// Get the receipt for the given transaction hash.
+	async fn receipt_by_hash(&self, transaction_hash: &H256) -> Option<ReceiptInfo>;
+
+	/// Get the signed transaction for the given transaction hash.
+	async fn signed_tx_by_hash(&self, transaction_hash: &H256) -> Option<TransactionSigned>;
+}
+
+#[async_trait]
+impl<Main: ReceiptProvider, Fallback: ReceiptProvider> ReceiptProvider for (Main, Fallback) {
+	async fn insert(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]) {
+		join!(self.0.insert(block_hash, receipts), self.1.insert(block_hash, receipts));
+	}
+
+	async fn remove(&self, block_hash: &H256) {
+		join!(self.0.remove(block_hash), self.1.remove(block_hash));
+	}
+
+	async fn receipt_by_block_hash_and_index(
+		&self,
+		block_hash: &H256,
+		transaction_index: &U256,
+	) -> Option<ReceiptInfo> {
+		if let Some(receipt) =
+			self.0.receipt_by_block_hash_and_index(block_hash, transaction_index).await
+		{
+			return Some(receipt);
+		}
+
+		self.1.receipt_by_block_hash_and_index(block_hash, transaction_index).await
+	}
+
+	async fn receipts_count_per_block(&self, block_hash: &H256) -> Option<usize> {
+		if let Some(count) = self.0.receipts_count_per_block(block_hash).await {
+			return Some(count);
+		}
+		self.1.receipts_count_per_block(block_hash).await
+	}
+
+	async fn receipt_by_hash(&self, hash: &H256) -> Option<ReceiptInfo> {
+		if let Some(receipt) = self.0.receipt_by_hash(hash).await {
+			return Some(receipt);
+		}
+		self.1.receipt_by_hash(hash).await
+	}
+
+	async fn signed_tx_by_hash(&self, hash: &H256) -> Option<TransactionSigned> {
+		if let Some(tx) = self.0.signed_tx_by_hash(hash).await {
+			return Some(tx);
+		}
+		self.1.signed_tx_by_hash(hash).await
+	}
+}
+
+/// Extract a [`TransactionSigned`] and a [`ReceiptInfo`] and  from an extrinsic.
+pub async fn extract_receipt_from_extrinsic(
+	block: &SubstrateBlock,
+	ext: subxt::blocks::ExtrinsicDetails<SrcChainConfig, subxt::OnlineClient<SrcChainConfig>>,
+	call: EthTransact,
+) -> Result<(TransactionSigned, ReceiptInfo), ClientError> {
+	let transaction_index = ext.index();
+	let block_number = U256::from(block.number());
+	let block_hash = block.hash();
+	let events = ext.events().await?;
+
+	let success = events.has::<ExtrinsicSuccess>().inspect_err(|err| {
+		log::debug!(target: LOG_TARGET, "Failed to lookup for ExtrinsicSuccess event in block {block_number}: {err:?}")
+	})?;
+	let tx_fees = events
+		.find_first::<TransactionFeePaid>()?
+		.ok_or(ClientError::TxFeeNotFound)
+		.inspect_err(
+			|err| log::debug!(target: LOG_TARGET, "TransactionFeePaid not found in events for block {block_number}\n{err:?}")
+		)?;
+	let transaction_hash = H256(keccak_256(&call.payload));
+
+	let signed_tx =
+		TransactionSigned::decode(&call.payload).map_err(|_| ClientError::TxDecodingFailed)?;
+	let from = signed_tx.recover_eth_address().map_err(|_| {
+		log::error!(target: LOG_TARGET, "Failed to recover eth address from signed tx");
+		ClientError::RecoverEthAddressFailed
+	})?;
+
+	let tx_info = GenericTransaction::from_signed(signed_tx.clone(), Some(from));
+	let gas_price = tx_info.gas_price.unwrap_or_default();
+	let gas_used = (tx_fees.tip.saturating_add(tx_fees.actual_fee))
+		.checked_div(gas_price.as_u128())
+		.unwrap_or_default();
+
+	// get logs from ContractEmitted event
+	let logs = events
+		.iter()
+		.filter_map(|event_details| {
+			let event_details = event_details.ok()?;
+			let event = event_details.as_event::<ContractEmitted>().ok()??;
+
+			Some(Log {
+				address: event.contract,
+				topics: event.topics,
+				data: Some(event.data.into()),
+				block_number: Some(block_number),
+				transaction_hash,
+				transaction_index: Some(transaction_index.into()),
+				block_hash: Some(block_hash),
+				log_index: Some(event_details.index().into()),
+				..Default::default()
+			})
+		})
+		.collect();
+
+	let contract_address = if tx_info.to.is_none() {
+		Some(create1(
+			&from,
+			tx_info
+				.nonce
+				.unwrap_or_default()
+				.try_into()
+				.map_err(|_| ClientError::ConversionFailed)?,
+		))
+	} else {
+		None
+	};
+
+	log::debug!(target: LOG_TARGET, "Adding receipt for tx hash: {transaction_hash:?} - block: {block_number:?}");
+	let receipt = ReceiptInfo::new(
+		block_hash,
+		block_number,
+		contract_address,
+		from,
+		logs,
+		tx_info.to,
+		gas_price,
+		gas_used.into(),
+		success,
+		transaction_hash,
+		transaction_index.into(),
+		tx_info.r#type.unwrap_or_default(),
+	);
+	Ok((signed_tx, receipt))
+}
+
+///  Extract receipts from block.
+pub async fn extract_receipts_from_block(
+	block: &SubstrateBlock,
+) -> Result<Vec<(TransactionSigned, ReceiptInfo)>, ClientError> {
+	// Filter extrinsics from pallet_revive
+	let extrinsics = block.extrinsics().await.inspect_err(|err| {
+		log::debug!(target: LOG_TARGET, "Error fetching for #{:?} extrinsics: {err:?}", block.number());
+	})?;
+
+	let extrinsics = extrinsics.iter().flat_map(|ext| {
+		let call = ext.as_extrinsic::<EthTransact>().ok()??;
+		Some((ext, call))
+	});
+
+	stream::iter(extrinsics)
+		.map(|(ext, call)| async move { extract_receipt_from_extrinsic(block, ext, call).await })
+		.buffer_unordered(10)
+		.collect::<Vec<Result<_, _>>>()
+		.await
+		.into_iter()
+		.collect::<Result<Vec<_>, _>>()
+}
+
+///  Extract receipt from transaction
+pub async fn extract_receipts_from_transaction(
+	block: &SubstrateBlock,
+	transaction_index: usize,
+) -> Result<(TransactionSigned, ReceiptInfo), ClientError> {
+	let extrinsics = block.extrinsics().await?;
+	let ext = extrinsics
+		.iter()
+		.nth(transaction_index)
+		.ok_or(ClientError::EthExtrinsicNotFound)?;
+
+	let call = ext
+		.as_extrinsic::<EthTransact>()?
+		.ok_or_else(|| ClientError::EthExtrinsicNotFound)?;
+	extract_receipt_from_extrinsic(block, ext, call).await
+}
diff --git a/substrate/frame/revive/rpc/src/receipt_provider/cache.rs b/substrate/frame/revive/rpc/src/receipt_provider/cache.rs
new file mode 100644
index 00000000000..39124929ec0
--- /dev/null
+++ b/substrate/frame/revive/rpc/src/receipt_provider/cache.rs
@@ -0,0 +1,148 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+use super::ReceiptProvider;
+use jsonrpsee::core::async_trait;
+use pallet_revive::evm::{ReceiptInfo, TransactionSigned, H256, U256};
+use std::{collections::HashMap, sync::Arc};
+use tokio::sync::RwLock;
+
+/// A `[ReceiptProvider]` that caches receipts in memory.
+#[derive(Clone, Default)]
+pub struct CacheReceiptProvider {
+	cache: Arc<RwLock<ReceiptCache>>,
+}
+
+impl CacheReceiptProvider {
+	/// Get a read access on the shared cache.
+	async fn cache(&self) -> tokio::sync::RwLockReadGuard<'_, ReceiptCache> {
+		self.cache.read().await
+	}
+}
+
+#[async_trait]
+impl ReceiptProvider for CacheReceiptProvider {
+	async fn insert(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]) {
+		let mut cache = self.cache.write().await;
+		cache.insert(block_hash, receipts);
+	}
+
+	async fn remove(&self, block_hash: &H256) {
+		let mut cache = self.cache.write().await;
+		cache.remove(block_hash);
+	}
+
+	async fn receipt_by_block_hash_and_index(
+		&self,
+		block_hash: &H256,
+		transaction_index: &U256,
+	) -> Option<ReceiptInfo> {
+		let cache = self.cache().await;
+		let receipt_hash = cache
+			.transaction_hashes_by_block_and_index
+			.get(block_hash)?
+			.get(transaction_index)?;
+		let receipt = cache.receipts_by_hash.get(receipt_hash)?;
+		Some(receipt.clone())
+	}
+
+	async fn receipts_count_per_block(&self, block_hash: &H256) -> Option<usize> {
+		let cache = self.cache().await;
+		cache.transaction_hashes_by_block_and_index.get(block_hash).map(|v| v.len())
+	}
+
+	async fn receipt_by_hash(&self, hash: &H256) -> Option<ReceiptInfo> {
+		let cache = self.cache().await;
+		cache.receipts_by_hash.get(hash).cloned()
+	}
+
+	async fn signed_tx_by_hash(&self, hash: &H256) -> Option<TransactionSigned> {
+		let cache = self.cache().await;
+		cache.signed_tx_by_hash.get(hash).cloned()
+	}
+}
+
+#[derive(Default)]
+struct ReceiptCache {
+	/// A map of receipts by transaction hash.
+	receipts_by_hash: HashMap<H256, ReceiptInfo>,
+
+	/// A map of Signed transaction by transaction hash.
+	signed_tx_by_hash: HashMap<H256, TransactionSigned>,
+
+	/// A map of receipt hashes by block hash.
+	transaction_hashes_by_block_and_index: HashMap<H256, HashMap<U256, H256>>,
+}
+
+impl ReceiptCache {
+	/// Insert new receipts into the cache.
+	pub fn insert(&mut self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]) {
+		if !receipts.is_empty() {
+			let values = receipts
+				.iter()
+				.map(|(_, receipt)| (receipt.transaction_index, receipt.transaction_hash))
+				.collect::<HashMap<_, _>>();
+
+			self.transaction_hashes_by_block_and_index.insert(*block_hash, values);
+
+			self.receipts_by_hash.extend(
+				receipts.iter().map(|(_, receipt)| (receipt.transaction_hash, receipt.clone())),
+			);
+
+			self.signed_tx_by_hash.extend(
+				receipts
+					.iter()
+					.map(|(signed_tx, receipt)| (receipt.transaction_hash, signed_tx.clone())),
+			)
+		}
+	}
+
+	/// Remove entry from the cache.
+	pub fn remove(&mut self, hash: &H256) {
+		if let Some(entries) = self.transaction_hashes_by_block_and_index.remove(hash) {
+			for hash in entries.values() {
+				self.receipts_by_hash.remove(hash);
+				self.signed_tx_by_hash.remove(hash);
+			}
+		}
+	}
+}
+
+#[cfg(test)]
+mod test {
+	use super::*;
+
+	#[test]
+	fn cache_insert_and_remove_works() {
+		let mut cache = ReceiptCache::default();
+
+		for i in 1u8..=3 {
+			let hash = H256::from([i; 32]);
+			cache.insert(
+				&hash,
+				&[(
+					TransactionSigned::default(),
+					ReceiptInfo { transaction_hash: hash, ..Default::default() },
+				)],
+			);
+		}
+
+		cache.remove(&H256::from([1u8; 32]));
+		assert_eq!(cache.transaction_hashes_by_block_and_index.len(), 2);
+		assert_eq!(cache.receipts_by_hash.len(), 2);
+		assert_eq!(cache.signed_tx_by_hash.len(), 2);
+	}
+}
diff --git a/substrate/frame/revive/rpc/src/receipt_provider/db.rs b/substrate/frame/revive/rpc/src/receipt_provider/db.rs
new file mode 100644
index 00000000000..63917d6193e
--- /dev/null
+++ b/substrate/frame/revive/rpc/src/receipt_provider/db.rs
@@ -0,0 +1,216 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use super::*;
+use crate::BlockInfoProvider;
+use jsonrpsee::core::async_trait;
+use pallet_revive::evm::{ReceiptInfo, TransactionSigned};
+use sp_core::{H256, U256};
+use sqlx::{query, SqlitePool};
+use std::sync::Arc;
+
+/// A `[ReceiptProvider]` that stores receipts in a SQLite database.
+#[derive(Clone)]
+pub struct DBReceiptProvider {
+	/// The database pool.
+	pool: SqlitePool,
+	/// The block provider used to fetch blocks, and reconstruct receipts.
+	block_provider: Arc<dyn BlockInfoProvider>,
+	/// weather or not we should write to the DB.
+	read_only: bool,
+}
+
+impl DBReceiptProvider {
+	/// Create a new `DBReceiptProvider` with the given database URL and block provider.
+	pub async fn new(
+		database_url: &str,
+		read_only: bool,
+		block_provider: Arc<dyn BlockInfoProvider>,
+	) -> Result<Self, sqlx::Error> {
+		let pool = SqlitePool::connect(database_url).await?;
+		Ok(Self { pool, block_provider, read_only })
+	}
+
+	async fn fetch_row(&self, transaction_hash: &H256) -> Option<(H256, usize)> {
+		let transaction_hash = hex::encode(transaction_hash);
+		let result = query!(
+			r#"
+			SELECT block_hash, transaction_index
+			FROM transaction_hashes
+			WHERE transaction_hash = $1
+			"#,
+			transaction_hash
+		)
+		.fetch_optional(&self.pool)
+		.await
+		.ok()??;
+
+		let block_hash = result.block_hash.parse::<H256>().ok()?;
+		let transaction_index = result.transaction_index.try_into().ok()?;
+		Some((block_hash, transaction_index))
+	}
+}
+
+#[async_trait]
+impl ReceiptProvider for DBReceiptProvider {
+	async fn insert(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]) {
+		if self.read_only {
+			return
+		}
+
+		let block_hash_str = hex::encode(block_hash);
+		for (_, receipt) in receipts {
+			let transaction_hash = hex::encode(receipt.transaction_hash);
+			let transaction_index = receipt.transaction_index.as_u32() as i32;
+
+			let result = query!(
+				r#"
+				INSERT INTO transaction_hashes (transaction_hash, block_hash, transaction_index)
+				VALUES ($1, $2, $3)
+
+				ON CONFLICT(transaction_hash) DO UPDATE SET
+				block_hash = EXCLUDED.block_hash,
+				transaction_index = EXCLUDED.transaction_index
+				"#,
+				transaction_hash,
+				block_hash_str,
+				transaction_index
+			)
+			.execute(&self.pool)
+			.await;
+
+			if let Err(err) = result {
+				log::error!(
+					"Error inserting transaction for block hash {block_hash:?}:  {:?}",
+					err
+				);
+			}
+		}
+	}
+
+	async fn remove(&self, _block_hash: &H256) {}
+
+	async fn receipts_count_per_block(&self, block_hash: &H256) -> Option<usize> {
+		let block_hash = hex::encode(block_hash);
+		let row = query!(
+			r#"
+            SELECT COUNT(*) as count
+            FROM transaction_hashes
+            WHERE block_hash = $1
+            "#,
+			block_hash
+		)
+		.fetch_one(&self.pool)
+		.await
+		.ok()?;
+
+		let count = row.count as usize;
+		Some(count)
+	}
+
+	async fn receipt_by_block_hash_and_index(
+		&self,
+		block_hash: &H256,
+		transaction_index: &U256,
+	) -> Option<ReceiptInfo> {
+		let block = self.block_provider.block_by_hash(block_hash).await.ok()??;
+		let transaction_index: usize = transaction_index.as_usize(); // TODO: check for overflow
+		let (_, receipt) =
+			extract_receipts_from_transaction(&block, transaction_index).await.ok()?;
+		Some(receipt)
+	}
+
+	async fn receipt_by_hash(&self, transaction_hash: &H256) -> Option<ReceiptInfo> {
+		let (block_hash, transaction_index) = self.fetch_row(transaction_hash).await?;
+
+		let block = self.block_provider.block_by_hash(&block_hash).await.ok()??;
+		let (_, receipt) =
+			extract_receipts_from_transaction(&block, transaction_index).await.ok()?;
+		Some(receipt)
+	}
+
+	async fn signed_tx_by_hash(&self, transaction_hash: &H256) -> Option<TransactionSigned> {
+		let transaction_hash = hex::encode(transaction_hash);
+		let result = query!(
+			r#"
+			SELECT block_hash, transaction_index
+			FROM transaction_hashes
+			WHERE transaction_hash = $1
+			"#,
+			transaction_hash
+		)
+		.fetch_optional(&self.pool)
+		.await
+		.ok()??;
+
+		let block_hash = result.block_hash.parse::<H256>().ok()?;
+		let transaction_index = result.transaction_index.try_into().ok()?;
+
+		let block = self.block_provider.block_by_hash(&block_hash).await.ok()??;
+		let (signed_tx, _) =
+			extract_receipts_from_transaction(&block, transaction_index).await.ok()?;
+		Some(signed_tx)
+	}
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+	use crate::test::MockBlockInfoProvider;
+	use pallet_revive::evm::{ReceiptInfo, TransactionSigned};
+	use sp_core::H256;
+	use sqlx::SqlitePool;
+
+	async fn setup_sqlite_provider(pool: SqlitePool) -> DBReceiptProvider {
+		DBReceiptProvider {
+			pool,
+			block_provider: Arc::new(MockBlockInfoProvider {}),
+			read_only: false,
+		}
+	}
+
+	#[sqlx::test]
+	async fn test_insert(pool: SqlitePool) {
+		let provider = setup_sqlite_provider(pool).await;
+		let block_hash = H256::default();
+		let receipts = vec![(TransactionSigned::default(), ReceiptInfo::default())];
+
+		provider.insert(&block_hash, &receipts).await;
+		let row = provider.fetch_row(&receipts[0].1.transaction_hash).await;
+		assert_eq!(row, Some((block_hash, 0)));
+	}
+
+	#[sqlx::test]
+	async fn test_receipts_count_per_block(pool: SqlitePool) {
+		let provider = setup_sqlite_provider(pool).await;
+		let block_hash = H256::default();
+		let receipts = vec![
+			(
+				TransactionSigned::default(),
+				ReceiptInfo { transaction_hash: H256::from([0u8; 32]), ..Default::default() },
+			),
+			(
+				TransactionSigned::default(),
+				ReceiptInfo { transaction_hash: H256::from([1u8; 32]), ..Default::default() },
+			),
+		];
+
+		provider.insert(&block_hash, &receipts).await;
+		let count = provider.receipts_count_per_block(&block_hash).await;
+		assert_eq!(count, Some(2));
+	}
+}
diff --git a/substrate/frame/revive/rpc/src/rpc_health.rs b/substrate/frame/revive/rpc/src/rpc_health.rs
index f94d4b82a80..35c5a588f28 100644
--- a/substrate/frame/revive/rpc/src/rpc_health.rs
+++ b/substrate/frame/revive/rpc/src/rpc_health.rs
@@ -25,6 +25,10 @@ pub trait SystemHealthRpc {
 	/// Proxy the substrate chain system_health RPC call.
 	#[method(name = "system_health")]
 	async fn system_health(&self) -> RpcResult<Health>;
+
+	///Returns the number of peers currently connected to the client.
+	#[method(name = "net_peerCount")]
+	async fn net_peer_count(&self) -> RpcResult<U64>;
 }
 
 pub struct SystemHealthRpcServerImpl {
@@ -47,4 +51,9 @@ impl SystemHealthRpcServer for SystemHealthRpcServerImpl {
 			should_have_peers: health.should_have_peers,
 		})
 	}
+
+	async fn net_peer_count(&self) -> RpcResult<U64> {
+		let health = self.client.system_health().await?;
+		Ok((health.peers as u64).into())
+	}
 }
diff --git a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs
index ad34dbfdfb4..da60360d9e6 100644
--- a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs
+++ b/substrate/frame/revive/rpc/src/rpc_methods_gen.rs
@@ -142,6 +142,10 @@ pub trait EthRpc {
 		transaction_hash: H256,
 	) -> RpcResult<Option<ReceiptInfo>>;
 
+	/// Returns the current maxPriorityFeePerGas per gas in wei.
+	#[method(name = "eth_maxPriorityFeePerGas")]
+	async fn max_priority_fee_per_gas(&self) -> RpcResult<U256>;
+
 	/// Submits a raw transaction. For EIP-4844 transactions, the raw form must be the network form.
 	/// This means it includes the blobs, KZG commitments, and KZG proofs.
 	#[method(name = "eth_sendRawTransaction")]
diff --git a/substrate/frame/revive/src/evm/api/rpc_types.rs b/substrate/frame/revive/src/evm/api/rpc_types.rs
index ed046cb4da4..b4b2c6ffcf1 100644
--- a/substrate/frame/revive/src/evm/api/rpc_types.rs
+++ b/substrate/frame/revive/src/evm/api/rpc_types.rs
@@ -192,7 +192,11 @@ impl GenericTransaction {
 				value: Some(tx.value),
 				to: Some(tx.to),
 				gas: Some(tx.gas),
-				gas_price: Some(tx.max_fee_per_blob_gas),
+				gas_price: Some(
+					U256::from(crate::GAS_PRICE)
+						.saturating_add(tx.max_priority_fee_per_gas)
+						.max(tx.max_fee_per_blob_gas),
+				),
 				access_list: Some(tx.access_list),
 				blob_versioned_hashes: tx.blob_versioned_hashes,
 				max_fee_per_blob_gas: Some(tx.max_fee_per_blob_gas),
@@ -209,7 +213,11 @@ impl GenericTransaction {
 				value: Some(tx.value),
 				to: tx.to,
 				gas: Some(tx.gas),
-				gas_price: Some(tx.gas_price),
+				gas_price: Some(
+					U256::from(crate::GAS_PRICE)
+						.saturating_add(tx.max_priority_fee_per_gas)
+						.max(tx.max_fee_per_gas),
+				),
 				access_list: Some(tx.access_list),
 				max_fee_per_gas: Some(tx.max_fee_per_gas),
 				max_priority_fee_per_gas: Some(tx.max_priority_fee_per_gas),
diff --git a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs
index 1d65fdefdde..5d31613ca31 100644
--- a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs
+++ b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs
@@ -87,7 +87,7 @@ pub struct Block {
 	/// Total difficulty
 	#[serde(rename = "totalDifficulty", skip_serializing_if = "Option::is_none")]
 	pub total_difficulty: Option<U256>,
-	pub transactions: H256OrTransactionInfo,
+	pub transactions: HashesOrTransactionInfos,
 	/// Transactions root
 	#[serde(rename = "transactionsRoot")]
 	pub transactions_root: H256,
@@ -357,15 +357,15 @@ pub enum BlockTag {
 	Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq,
 )]
 #[serde(untagged)]
-pub enum H256OrTransactionInfo {
+pub enum HashesOrTransactionInfos {
 	/// Transaction hashes
-	H256s(Vec<H256>),
+	Hashes(Vec<H256>),
 	/// Full transactions
 	TransactionInfos(Vec<TransactionInfo>),
 }
-impl Default for H256OrTransactionInfo {
+impl Default for HashesOrTransactionInfos {
 	fn default() -> Self {
-		H256OrTransactionInfo::H256s(Default::default())
+		HashesOrTransactionInfos::Hashes(Default::default())
 	}
 }
 
diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs
index b24de61314f..3bd4bde5679 100644
--- a/substrate/frame/revive/src/wasm/mod.rs
+++ b/substrate/frame/revive/src/wasm/mod.rs
@@ -193,8 +193,9 @@ where
 						&HoldReason::CodeUploadDepositReserve.into(),
 						&self.code_info.owner,
 						deposit,
-					) .map_err(|err| { log::debug!(target: LOG_TARGET, "failed to store code for owner: {:?}: {err:?}", self.code_info.owner);
-						<Error<T>>::StorageDepositNotEnoughFunds
+					) .map_err(|err| {
+							log::debug!(target: LOG_TARGET, "failed to hold store code deposit {deposit:?} for owner: {:?}: {err:?}", self.code_info.owner);
+							<Error<T>>::StorageDepositNotEnoughFunds
 					})?;
 					}
 
-- 
GitLab


From 6878ba1f399b628cf456ad3abfe72f2553422e1f Mon Sep 17 00:00:00 2001
From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com>
Date: Tue, 14 Jan 2025 16:52:49 +0200
Subject: [PATCH 050/116] Retry approval on availability failure if the check
 is still needed (#6807)

Recovering the POV can fail in situation where the node just restart and
the DHT topology wasn't fully discovered yet, so the current node can't
connect to most of its Peers. This is bad because for gossiping the
assignment you need to be connected to just a few peers, so because we
can't approve the candidate and other nodes will see this as a no show.

This becomes bad in the scenario where you've got a lot of nodes
restarting at the same time, so you end up having a lot of no-shows in
the network that are never covered, in that case it makes sense for
nodes to actually retry approving the candidate at a later data in time
and retry several times if the block containing the candidate wasn't
approved.

## TODO
- [x] Add a subsystem test.

---------

Signed-off-by: Alexandru Gheorghe <alexandru.gheorghe@parity.io>
---
 polkadot/node/core/approval-voting/src/lib.rs | 137 +++++++++++++++-
 .../node/core/approval-voting/src/tests.rs    | 146 ++++++++++++++++++
 .../subsystem-bench/src/lib/approval/mod.rs   |   2 +
 prdoc/pr_6807.prdoc                           |  19 +++
 4 files changed, 297 insertions(+), 7 deletions(-)
 create mode 100644 prdoc/pr_6807.prdoc

diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs
index 7cea22d1a6a..27361df3731 100644
--- a/polkadot/node/core/approval-voting/src/lib.rs
+++ b/polkadot/node/core/approval-voting/src/lib.rs
@@ -21,6 +21,7 @@
 //! of others. It uses this information to determine when candidates and blocks have
 //! been sufficiently approved to finalize.
 
+use futures_timer::Delay;
 use polkadot_node_primitives::{
 	approval::{
 		v1::{BlockApprovalMeta, DelayTranche},
@@ -122,6 +123,9 @@ const APPROVAL_CHECKING_TIMEOUT: Duration = Duration::from_secs(120);
 const WAIT_FOR_SIGS_TIMEOUT: Duration = Duration::from_millis(500);
 const APPROVAL_CACHE_SIZE: u32 = 1024;
 
+/// The maximum number of times we retry to approve a block if is still needed.
+const MAX_APPROVAL_RETRIES: u32 = 16;
+
 const APPROVAL_DELAY: Tick = 2;
 pub(crate) const LOG_TARGET: &str = "parachain::approval-voting";
 
@@ -165,6 +169,10 @@ pub struct ApprovalVotingSubsystem {
 	metrics: Metrics,
 	clock: Arc<dyn Clock + Send + Sync>,
 	spawner: Arc<dyn overseer::gen::Spawner + 'static>,
+	/// The maximum time we retry to approve a block if it is still needed and PoV fetch failed.
+	max_approval_retries: u32,
+	/// The backoff before we retry the approval.
+	retry_backoff: Duration,
 }
 
 #[derive(Clone)]
@@ -493,6 +501,8 @@ impl ApprovalVotingSubsystem {
 			metrics,
 			Arc::new(SystemClock {}),
 			spawner,
+			MAX_APPROVAL_RETRIES,
+			APPROVAL_CHECKING_TIMEOUT / 2,
 		)
 	}
 
@@ -505,6 +515,8 @@ impl ApprovalVotingSubsystem {
 		metrics: Metrics,
 		clock: Arc<dyn Clock + Send + Sync>,
 		spawner: Arc<dyn overseer::gen::Spawner + 'static>,
+		max_approval_retries: u32,
+		retry_backoff: Duration,
 	) -> Self {
 		ApprovalVotingSubsystem {
 			keystore,
@@ -515,6 +527,8 @@ impl ApprovalVotingSubsystem {
 			metrics,
 			clock,
 			spawner,
+			max_approval_retries,
+			retry_backoff,
 		}
 	}
 
@@ -706,18 +720,53 @@ enum ApprovalOutcome {
 	TimedOut,
 }
 
+#[derive(Clone)]
+struct RetryApprovalInfo {
+	candidate: CandidateReceipt,
+	backing_group: GroupIndex,
+	executor_params: ExecutorParams,
+	core_index: Option<CoreIndex>,
+	session_index: SessionIndex,
+	attempts_remaining: u32,
+	backoff: Duration,
+}
+
 struct ApprovalState {
 	validator_index: ValidatorIndex,
 	candidate_hash: CandidateHash,
 	approval_outcome: ApprovalOutcome,
+	retry_info: Option<RetryApprovalInfo>,
 }
 
 impl ApprovalState {
 	fn approved(validator_index: ValidatorIndex, candidate_hash: CandidateHash) -> Self {
-		Self { validator_index, candidate_hash, approval_outcome: ApprovalOutcome::Approved }
+		Self {
+			validator_index,
+			candidate_hash,
+			approval_outcome: ApprovalOutcome::Approved,
+			retry_info: None,
+		}
 	}
 	fn failed(validator_index: ValidatorIndex, candidate_hash: CandidateHash) -> Self {
-		Self { validator_index, candidate_hash, approval_outcome: ApprovalOutcome::Failed }
+		Self {
+			validator_index,
+			candidate_hash,
+			approval_outcome: ApprovalOutcome::Failed,
+			retry_info: None,
+		}
+	}
+
+	fn failed_with_retry(
+		validator_index: ValidatorIndex,
+		candidate_hash: CandidateHash,
+		retry_info: Option<RetryApprovalInfo>,
+	) -> Self {
+		Self {
+			validator_index,
+			candidate_hash,
+			approval_outcome: ApprovalOutcome::Failed,
+			retry_info,
+		}
 	}
 }
 
@@ -757,6 +806,7 @@ impl CurrentlyCheckingSet {
 							candidate_hash,
 							validator_index,
 							approval_outcome: ApprovalOutcome::TimedOut,
+							retry_info: None,
 						},
 						Some(approval_state) => approval_state,
 					}
@@ -1271,18 +1321,19 @@ where
 						validator_index,
 						candidate_hash,
 						approval_outcome,
+						retry_info,
 					}
 				) = approval_state;
 
 				if matches!(approval_outcome, ApprovalOutcome::Approved) {
 					let mut approvals: Vec<Action> = relay_block_hashes
-						.into_iter()
+						.iter()
 						.map(|block_hash|
 							Action::IssueApproval(
 								candidate_hash,
 								ApprovalVoteRequest {
 									validator_index,
-									block_hash,
+									block_hash: *block_hash,
 								},
 							)
 						)
@@ -1290,6 +1341,43 @@ where
 					actions.append(&mut approvals);
 				}
 
+				if let Some(retry_info) = retry_info {
+					for block_hash in relay_block_hashes {
+						if overlayed_db.load_block_entry(&block_hash).map(|block_info| block_info.is_some()).unwrap_or(false) {
+							let sender = to_other_subsystems.clone();
+							let spawn_handle = subsystem.spawner.clone();
+							let metrics = subsystem.metrics.clone();
+							let retry_info = retry_info.clone();
+							let executor_params = retry_info.executor_params.clone();
+							let candidate = retry_info.candidate.clone();
+
+							currently_checking_set
+								.insert_relay_block_hash(
+									candidate_hash,
+									validator_index,
+									block_hash,
+									async move {
+										launch_approval(
+											sender,
+											spawn_handle,
+											metrics,
+											retry_info.session_index,
+											candidate,
+											validator_index,
+											block_hash,
+											retry_info.backing_group,
+											executor_params,
+											retry_info.core_index,
+											retry_info,
+										)
+										.await
+									},
+								)
+								.await?;
+						}
+					}
+				}
+
 				actions
 			},
 			(block_hash, validator_index) = delayed_approvals_timers.select_next_some() => {
@@ -1340,6 +1428,8 @@ where
 			&mut approvals_cache,
 			&mut subsystem.mode,
 			actions,
+			subsystem.max_approval_retries,
+			subsystem.retry_backoff,
 		)
 		.await?
 		{
@@ -1389,6 +1479,8 @@ pub async fn start_approval_worker<
 		metrics,
 		clock,
 		spawner,
+		MAX_APPROVAL_RETRIES,
+		APPROVAL_CHECKING_TIMEOUT / 2,
 	);
 	let backend = DbBackend::new(db.clone(), approval_voting.db_config);
 	let spawner = approval_voting.spawner.clone();
@@ -1456,6 +1548,8 @@ async fn handle_actions<
 	approvals_cache: &mut LruMap<CandidateHash, ApprovalOutcome>,
 	mode: &mut Mode,
 	actions: Vec<Action>,
+	max_approval_retries: u32,
+	retry_backoff: Duration,
 ) -> SubsystemResult<bool> {
 	let mut conclude = false;
 	let mut actions_iter = actions.into_iter();
@@ -1542,6 +1636,16 @@ async fn handle_actions<
 						let sender = sender.clone();
 						let spawn_handle = spawn_handle.clone();
 
+						let retry = RetryApprovalInfo {
+							candidate: candidate.clone(),
+							backing_group,
+							executor_params: executor_params.clone(),
+							core_index,
+							session_index: session,
+							attempts_remaining: max_approval_retries,
+							backoff: retry_backoff,
+						};
+
 						currently_checking_set
 							.insert_relay_block_hash(
 								candidate_hash,
@@ -1559,6 +1663,7 @@ async fn handle_actions<
 										backing_group,
 										executor_params,
 										core_index,
+										retry,
 									)
 									.await
 								},
@@ -3329,6 +3434,7 @@ async fn launch_approval<
 	backing_group: GroupIndex,
 	executor_params: ExecutorParams,
 	core_index: Option<CoreIndex>,
+	retry: RetryApprovalInfo,
 ) -> SubsystemResult<RemoteHandle<ApprovalState>> {
 	let (a_tx, a_rx) = oneshot::channel();
 	let (code_tx, code_rx) = oneshot::channel();
@@ -3360,6 +3466,7 @@ async fn launch_approval<
 
 	let candidate_hash = candidate.hash();
 	let para_id = candidate.descriptor.para_id();
+	let mut next_retry = None;
 	gum::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Recovering data.");
 
 	let timer = metrics.time_recover_and_approve();
@@ -3388,7 +3495,6 @@ async fn launch_approval<
 	let background = async move {
 		// Force the move of the timer into the background task.
 		let _timer = timer;
-
 		let available_data = match a_rx.await {
 			Err(_) => return ApprovalState::failed(validator_index, candidate_hash),
 			Ok(Ok(a)) => a,
@@ -3399,10 +3505,27 @@ async fn launch_approval<
 							target: LOG_TARGET,
 							?para_id,
 							?candidate_hash,
+							attempts_remaining = retry.attempts_remaining,
 							"Data unavailable for candidate {:?}",
 							(candidate_hash, candidate.descriptor.para_id()),
 						);
-						// do nothing. we'll just be a no-show and that'll cause others to rise up.
+						// Availability could fail if we did not discover much of the network, so
+						// let's back off and order the subsystem to retry at a later point if the
+						// approval is still needed, because no-show wasn't covered yet.
+						if retry.attempts_remaining > 0 {
+							Delay::new(retry.backoff).await;
+							next_retry = Some(RetryApprovalInfo {
+								candidate,
+								backing_group,
+								executor_params,
+								core_index,
+								session_index,
+								attempts_remaining: retry.attempts_remaining - 1,
+								backoff: retry.backoff,
+							});
+						} else {
+							next_retry = None;
+						}
 						metrics_guard.take().on_approval_unavailable();
 					},
 					&RecoveryError::ChannelClosed => {
@@ -3433,7 +3556,7 @@ async fn launch_approval<
 						metrics_guard.take().on_approval_invalid();
 					},
 				}
-				return ApprovalState::failed(validator_index, candidate_hash)
+				return ApprovalState::failed_with_retry(validator_index, candidate_hash, next_retry)
 			},
 		};
 
diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs
index be569a1de3e..b72993fe1a9 100644
--- a/polkadot/node/core/approval-voting/src/tests.rs
+++ b/polkadot/node/core/approval-voting/src/tests.rs
@@ -78,6 +78,9 @@ const SLOT_DURATION_MILLIS: u64 = 5000;
 
 const TIMEOUT: Duration = Duration::from_millis(2000);
 
+const NUM_APPROVAL_RETRIES: u32 = 3;
+const RETRY_BACKOFF: Duration = Duration::from_millis(300);
+
 #[derive(Clone)]
 struct TestSyncOracle {
 	flag: Arc<AtomicBool>,
@@ -573,6 +576,8 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(
 			Metrics::default(),
 			clock.clone(),
 			Arc::new(SpawnGlue(pool)),
+			NUM_APPROVAL_RETRIES,
+			RETRY_BACKOFF,
 		),
 		assignment_criteria,
 		backend,
@@ -3202,6 +3207,20 @@ async fn recover_available_data(virtual_overseer: &mut VirtualOverseer) {
 	);
 }
 
+async fn recover_available_data_failure(virtual_overseer: &mut VirtualOverseer) {
+	let available_data = RecoveryError::Unavailable;
+
+	assert_matches!(
+		virtual_overseer.recv().await,
+		AllMessages::AvailabilityRecovery(
+			AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, _, tx)
+		) => {
+			tx.send(Err(available_data)).unwrap();
+		},
+		"overseer did not receive recover available data message",
+	);
+}
+
 struct TriggersAssignmentConfig<F1, F2> {
 	our_assigned_tranche: DelayTranche,
 	assign_validator_tranche: F1,
@@ -4791,6 +4810,133 @@ fn subsystem_relaunches_approval_work_on_restart() {
 	});
 }
 
+/// Test that we retry the approval of candidate on availability failure, up to max retries.
+#[test]
+fn subsystem_relaunches_approval_work_on_availability_failure() {
+	let assignment_criteria = Box::new(MockAssignmentCriteria(
+		|| {
+			let mut assignments = HashMap::new();
+
+			let _ = assignments.insert(
+				CoreIndex(0),
+				approval_db::v2::OurAssignment {
+					cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact {
+						core_bitfield: vec![CoreIndex(0), CoreIndex(2)].try_into().unwrap(),
+					}),
+					tranche: 0,
+					validator_index: ValidatorIndex(0),
+					triggered: false,
+				}
+				.into(),
+			);
+
+			let _ = assignments.insert(
+				CoreIndex(1),
+				approval_db::v2::OurAssignment {
+					cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFDelay {
+						core_index: CoreIndex(1),
+					}),
+					tranche: 0,
+					validator_index: ValidatorIndex(0),
+					triggered: false,
+				}
+				.into(),
+			);
+			assignments
+		},
+		|_| Ok(0),
+	));
+	let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build();
+	let store = config.backend();
+
+	test_harness(config, |test_harness| async move {
+		let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness;
+
+		setup_overseer_with_blocks_with_two_assignments_triggered(
+			&mut virtual_overseer,
+			store,
+			&clock,
+			sync_oracle_handle,
+		)
+		.await;
+
+		// We have two candidates for one we are going to fail the availability for up to
+		// max_retries and for the other we are going to succeed on the last retry, so we should
+		// see the approval being distributed.
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment(
+				_,
+				_,
+			)) => {
+			}
+		);
+
+		recover_available_data_failure(&mut virtual_overseer).await;
+		fetch_validation_code(&mut virtual_overseer).await;
+
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment(
+				_,
+				_
+			)) => {
+			}
+		);
+
+		recover_available_data_failure(&mut virtual_overseer).await;
+		fetch_validation_code(&mut virtual_overseer).await;
+
+		recover_available_data_failure(&mut virtual_overseer).await;
+		fetch_validation_code(&mut virtual_overseer).await;
+
+		recover_available_data_failure(&mut virtual_overseer).await;
+		fetch_validation_code(&mut virtual_overseer).await;
+
+		recover_available_data_failure(&mut virtual_overseer).await;
+		fetch_validation_code(&mut virtual_overseer).await;
+
+		recover_available_data_failure(&mut virtual_overseer).await;
+		fetch_validation_code(&mut virtual_overseer).await;
+
+		recover_available_data_failure(&mut virtual_overseer).await;
+		fetch_validation_code(&mut virtual_overseer).await;
+
+		recover_available_data(&mut virtual_overseer).await;
+		fetch_validation_code(&mut virtual_overseer).await;
+
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive {
+				exec_kind,
+				response_sender,
+				..
+			}) if exec_kind == PvfExecKind::Approval => {
+				response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default())))
+					.unwrap();
+			}
+		);
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => {
+				let _ = sender.send(Ok(ApprovalVotingParams {
+					max_approval_coalesce_count: 1,
+				}));
+			}
+		);
+
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_))
+		);
+
+		// Assert that there are no more messages being sent by the subsystem
+		assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none());
+
+		virtual_overseer
+	});
+}
+
 // Test that cached approvals, which are candidates that we approved but we didn't issue
 // the signature yet because we want to coalesce it with more candidate are sent after restart.
 #[test]
diff --git a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs
index 1b20960a3f8..5f1689cb226 100644
--- a/polkadot/node/subsystem-bench/src/lib/approval/mod.rs
+++ b/polkadot/node/subsystem-bench/src/lib/approval/mod.rs
@@ -891,6 +891,8 @@ fn build_overseer(
 			state.approval_voting_parallel_metrics.approval_voting_metrics(),
 			Arc::new(system_clock.clone()),
 			Arc::new(SpawnGlue(spawn_task_handle.clone())),
+			1,
+			Duration::from_secs(1),
 		);
 
 		let approval_distribution = ApprovalDistribution::new_with_clock(
diff --git a/prdoc/pr_6807.prdoc b/prdoc/pr_6807.prdoc
new file mode 100644
index 00000000000..b9564dfb2fe
--- /dev/null
+++ b/prdoc/pr_6807.prdoc
@@ -0,0 +1,19 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Retry approval on availability failure if the check is still needed 
+
+doc:
+  - audience: Node Dev
+    description: |
+      Recovering the POV can fail in situation where the node just restart and the DHT topology
+      wasn't fully discovered yet, so the current node can't connect to most of its Peers. 
+      This is bad because for gossiping the assignment you need to be connected to just a few 
+      peers, so because we can't approve the candidate other nodes will see this as a no show.
+      Fix it by retrying to approve a candidate for a fixed number of atttempts if the block is 
+      still needed.
+
+
+crates:
+  - name: polkadot-node-core-approval-voting
+    bump: minor
-- 
GitLab


From d38bb9533b70abb7eff4e8770177d7840899ca86 Mon Sep 17 00:00:00 2001
From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com>
Date: Tue, 14 Jan 2025 19:10:27 +0200
Subject: [PATCH 051/116] approval-voting: Fix sending of assignments after
 restart (#6973)

There is a problem on restart where nodes will not trigger their needed
assignment if they were offline while the time of the assignment passed.

That happens because after restart we will hit this condition
https://github.com/paritytech/polkadot-sdk/blob/4e805ca05067f6ed970f33f9be51483185b0cc0b/polkadot/node/core/approval-voting/src/lib.rs#L2495
and considered will be `tick_now` which is already higher than the tick
of our assignment.

The fix is to schedule a wakeup for untriggered assignments at restart
and let the logic of processing an wakeup decide if it needs to trigger
the assignment or not.

One thing that we need to be careful here is to make sure we don't
schedule the wake up immediately after restart because, the node would
still be behind with all the assignments that should have received and
might make it wrongfully decide it needs to trigger its assignment, so I
added a `RESTART_WAKEUP_DELAY: Tick = 12` which should be more than
enough for the node to catch up.

---------

Signed-off-by: Alexandru Gheorghe <alexandru.gheorghe@parity.io>
Co-authored-by: ordian <write@reusable.software>
Co-authored-by: Andrei Eres <eresav@me.com>
---
 polkadot/node/core/approval-voting/src/lib.rs |  25 +-
 .../node/core/approval-voting/src/tests.rs    | 246 ++++++++++++++++++
 prdoc/pr_6973.prdoc                           |  16 ++
 3 files changed, 286 insertions(+), 1 deletion(-)
 create mode 100644 prdoc/pr_6973.prdoc

diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs
index 27361df3731..b4c2a6afee0 100644
--- a/polkadot/node/core/approval-voting/src/lib.rs
+++ b/polkadot/node/core/approval-voting/src/lib.rs
@@ -132,6 +132,16 @@ pub(crate) const LOG_TARGET: &str = "parachain::approval-voting";
 // The max number of ticks we delay sending the approval after we are ready to issue the approval
 const MAX_APPROVAL_COALESCE_WAIT_TICKS: Tick = 12;
 
+// If the node restarted and the tranche has passed without the assignment
+// being trigger, we won't trigger the assignment at restart because we don't have
+// an wakeup schedule for it.
+// The solution, is to always schedule a wake up after the restart and let the
+// process_wakeup to decide if the assignment needs to be triggered.
+// We need to have a delay after restart to give time to the node to catch up with
+// messages and not trigger its assignment unnecessarily, because it hasn't seen
+// the assignments from the other validators.
+const RESTART_WAKEUP_DELAY: Tick = 12;
+
 /// Configuration for the approval voting subsystem
 #[derive(Debug, Clone)]
 pub struct Config {
@@ -1837,7 +1847,20 @@ async fn distribution_messages_for_activation<Sender: SubsystemSender<RuntimeApi
 			match candidate_entry.approval_entry(&block_hash) {
 				Some(approval_entry) => {
 					match approval_entry.local_statements() {
-						(None, None) | (None, Some(_)) => {}, // second is impossible case.
+						(None, None) =>
+							if approval_entry
+								.our_assignment()
+								.map(|assignment| !assignment.triggered())
+								.unwrap_or(false)
+							{
+								actions.push(Action::ScheduleWakeup {
+									block_hash,
+									block_number: block_entry.block_number(),
+									candidate_hash: *candidate_hash,
+									tick: state.clock.tick_now() + RESTART_WAKEUP_DELAY,
+								})
+							},
+						(None, Some(_)) => {}, // second is impossible case.
 						(Some(assignment), None) => {
 							let claimed_core_indices =
 								get_core_indices_on_startup(&assignment.cert().kind, *core_index);
diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs
index b72993fe1a9..9fe716833b8 100644
--- a/polkadot/node/core/approval-voting/src/tests.rs
+++ b/polkadot/node/core/approval-voting/src/tests.rs
@@ -5380,6 +5380,252 @@ fn subsystem_sends_assignment_approval_in_correct_order_on_approval_restart() {
 	});
 }
 
+// Test that if the subsystem missed the triggering of some tranches because it was not running
+// it launches the missed assignements on restart.
+#[test]
+fn subsystem_launches_missed_assignments_on_restart() {
+	let test_tranche = 20;
+	let assignment_criteria = Box::new(MockAssignmentCriteria(
+		move || {
+			let mut assignments = HashMap::new();
+			let _ = assignments.insert(
+				CoreIndex(0),
+				approval_db::v2::OurAssignment {
+					cert: garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFDelay {
+						core_index: CoreIndex(0),
+					}),
+					tranche: test_tranche,
+					validator_index: ValidatorIndex(0),
+					triggered: false,
+				}
+				.into(),
+			);
+
+			assignments
+		},
+		|_| Ok(0),
+	));
+	let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build();
+	let store = config.backend();
+	let store_clone = config.backend();
+
+	test_harness(config, |test_harness| async move {
+		let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness;
+
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => {
+				rx.send(Ok(0)).unwrap();
+			}
+		);
+
+		let block_hash = Hash::repeat_byte(0x01);
+		let fork_block_hash = Hash::repeat_byte(0x02);
+		let candidate_commitments = CandidateCommitments::default();
+		let mut candidate_receipt = dummy_candidate_receipt_v2(block_hash);
+		candidate_receipt.commitments_hash = candidate_commitments.hash();
+		let candidate_hash = candidate_receipt.hash();
+		let slot = Slot::from(1);
+		let (chain_builder, _session_info) = build_chain_with_two_blocks_with_one_candidate_each(
+			block_hash,
+			fork_block_hash,
+			slot,
+			sync_oracle_handle,
+			candidate_receipt,
+		)
+		.await;
+		chain_builder.build(&mut virtual_overseer).await;
+
+		assert!(!clock.inner.lock().current_wakeup_is(1));
+		clock.inner.lock().wakeup_all(1);
+
+		assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot) + test_tranche as u64));
+		clock.inner.lock().wakeup_all(slot_to_tick(slot));
+
+		futures_timer::Delay::new(Duration::from_millis(200)).await;
+
+		clock.inner.lock().wakeup_all(slot_to_tick(slot + 2));
+
+		assert_eq!(clock.inner.lock().wakeups.len(), 0);
+
+		futures_timer::Delay::new(Duration::from_millis(200)).await;
+
+		let candidate_entry = store.load_candidate_entry(&candidate_hash).unwrap().unwrap();
+		let our_assignment =
+			candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap();
+		assert!(!our_assignment.triggered());
+
+		// Assignment is not triggered because its tranches has not been reached.
+		virtual_overseer
+	});
+
+	// Restart a new approval voting subsystem with the same database and major syncing true until
+	// the last leaf.
+	let config = HarnessConfigBuilder::default().backend(store_clone).major_syncing(true).build();
+
+	test_harness(config, |test_harness| async move {
+		let TestHarness { mut virtual_overseer, clock, sync_oracle_handle } = test_harness;
+		let slot = Slot::from(1);
+		// 1. Set the clock to the to a tick past the tranche where the assignment should be
+		//    triggered.
+		clock.inner.lock().set_tick(slot_to_tick(slot) + 2 * test_tranche as u64);
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => {
+				rx.send(Ok(0)).unwrap();
+			}
+		);
+
+		let block_hash = Hash::repeat_byte(0x01);
+		let fork_block_hash = Hash::repeat_byte(0x02);
+		let candidate_commitments = CandidateCommitments::default();
+		let mut candidate_receipt = dummy_candidate_receipt_v2(block_hash);
+		candidate_receipt.commitments_hash = candidate_commitments.hash();
+		let (chain_builder, session_info) = build_chain_with_two_blocks_with_one_candidate_each(
+			block_hash,
+			fork_block_hash,
+			slot,
+			sync_oracle_handle,
+			candidate_receipt,
+		)
+		.await;
+
+		chain_builder.build(&mut virtual_overseer).await;
+
+		futures_timer::Delay::new(Duration::from_millis(2000)).await;
+
+		// On major syncing ending Approval voting should send all the necessary messages for a
+		// candidate to be approved.
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NewBlocks(
+				_,
+			)) => {
+			}
+		);
+
+		clock
+			.inner
+			.lock()
+			.wakeup_all(slot_to_tick(slot) + 2 * test_tranche as u64 + RESTART_WAKEUP_DELAY - 1);
+
+		// Subsystem should not send any messages because the assignment is not triggered yet.
+		assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none());
+
+		// Set the clock to the tick where the assignment should be triggered.
+		clock
+			.inner
+			.lock()
+			.wakeup_all(slot_to_tick(slot) + 2 * test_tranche as u64 + RESTART_WAKEUP_DELAY);
+
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::RuntimeApi(
+				RuntimeApiMessage::Request(
+					_,
+					RuntimeApiRequest::SessionInfo(_, si_tx),
+				)
+			) => {
+				si_tx.send(Ok(Some(session_info.clone()))).unwrap();
+			}
+		);
+
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::RuntimeApi(
+				RuntimeApiMessage::Request(
+					_,
+					RuntimeApiRequest::SessionExecutorParams(_, si_tx),
+				)
+			) => {
+				// Make sure all SessionExecutorParams calls are not made for the leaf (but for its relay parent)
+				si_tx.send(Ok(Some(ExecutorParams::default()))).unwrap();
+			}
+		);
+
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::RuntimeApi(
+				RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), )
+			) => {
+				si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap();
+			}
+		);
+
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment(
+				_,
+				_,
+			)) => {
+			}
+		);
+
+		// Guarantees the approval work has been relaunched.
+		recover_available_data(&mut virtual_overseer).await;
+		fetch_validation_code(&mut virtual_overseer).await;
+
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive {
+				exec_kind,
+				response_sender,
+				..
+			}) if exec_kind == PvfExecKind::Approval => {
+				response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default())))
+					.unwrap();
+			}
+		);
+
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => {
+				let _ = sender.send(Ok(ApprovalVotingParams {
+					max_approval_coalesce_count: 1,
+				}));
+			}
+		);
+
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_))
+		);
+
+		clock
+			.inner
+			.lock()
+			.wakeup_all(slot_to_tick(slot) + 2 * test_tranche as u64 + RESTART_WAKEUP_DELAY);
+
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment(
+				_,
+				_,
+			)) => {
+			}
+		);
+
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => {
+				let _ = sender.send(Ok(ApprovalVotingParams {
+					max_approval_coalesce_count: 1,
+				}));
+			}
+		);
+
+		assert_matches!(
+			overseer_recv(&mut virtual_overseer).await,
+			AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_))
+		);
+
+		// Assert that there are no more messages being sent by the subsystem
+		assert!(overseer_recv(&mut virtual_overseer).timeout(TIMEOUT / 2).await.is_none());
+
+		virtual_overseer
+	});
+}
+
 // Test we correctly update the timer when we mark the beginning of gathering assignments.
 #[test]
 fn test_gathering_assignments_statements() {
diff --git a/prdoc/pr_6973.prdoc b/prdoc/pr_6973.prdoc
new file mode 100644
index 00000000000..416789b9171
--- /dev/null
+++ b/prdoc/pr_6973.prdoc
@@ -0,0 +1,16 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: approval-voting fix sending of assignments after restart
+
+doc:
+  - audience: Node Dev
+    description: |
+      There is a problem on restart where nodes will not trigger their needed assignment if 
+      they were offline and the time of the assignment passed, so after restart always 
+      schedule a wakeup so that nodes a have the opportunity of triggering their assignments
+      if they are still needed.
+
+crates:
+  - name: polkadot-node-core-approval-voting
+    bump: minor
-- 
GitLab


From ba36b2d2293d72d087072254e6371d9089f192b7 Mon Sep 17 00:00:00 2001
From: Sebastian Kunert <skunert49@gmail.com>
Date: Tue, 14 Jan 2025 18:56:30 +0100
Subject: [PATCH 052/116] CI: Only format umbrella crate during umbrella check
 (#7139)

The umbrella crate quick-check was always failing whenever there was
something misformated in the whole codebase.
This leads to an error that indicates that a new crate was added, even
when it was not.

After this PR we only apply `cargo fmt` to the newly generated umbrella
crate `polkadot-sdk`. This results in this check being independent from
the fmt job which should check the entire codebase.
---
 .github/workflows/checks-quick.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/checks-quick.yml b/.github/workflows/checks-quick.yml
index 4c26b85a630..1a8813833de 100644
--- a/.github/workflows/checks-quick.yml
+++ b/.github/workflows/checks-quick.yml
@@ -138,7 +138,7 @@ jobs:
           # Fixes "detected dubious ownership" error in the ci
           git config --global --add safe.directory '*'
           python3 scripts/generate-umbrella.py --sdk . --version 0.1.0
-          cargo +nightly fmt --all
+          cargo +nightly fmt -p polkadot-sdk
 
           if [ -n "$(git status --porcelain)" ]; then
             cat <<EOF
-- 
GitLab


From 85c244f6e6e59db23bdfcfef903fd9145f0546ad Mon Sep 17 00:00:00 2001
From: Carlo Sala <carlosalag@protonmail.com>
Date: Tue, 14 Jan 2025 20:57:05 +0100
Subject: [PATCH 053/116] xcm: convert properly assets in xcmpayment apis
 (#7134)

Port #6459 changes to relays as well, which were probably forgotten in
that PR.
Thanks!

---------

Co-authored-by: Francisco Aguirre <franciscoaguirreperez@gmail.com>
Co-authored-by: command-bot <>
---
 polkadot/runtime/rococo/src/lib.rs  |  3 ++-
 polkadot/runtime/westend/src/lib.rs |  3 ++-
 prdoc/pr_7134.prdoc                 | 11 +++++++++++
 3 files changed, 15 insertions(+), 2 deletions(-)
 create mode 100644 prdoc/pr_7134.prdoc

diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs
index e5d703700fe..b3f2a003327 100644
--- a/polkadot/runtime/rococo/src/lib.rs
+++ b/polkadot/runtime/rococo/src/lib.rs
@@ -1885,7 +1885,8 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result<u128, XcmPaymentApiError> {
-			match asset.try_as::<AssetId>() {
+			let latest_asset_id: Result<AssetId, ()> = asset.clone().try_into();
+			match latest_asset_id {
 				Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => {
 					// for native token
 					Ok(WeightToFee::weight_to_fee(&weight))
diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index 9d77a5e5eea..58d2bdcb7c7 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -2445,7 +2445,8 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result<u128, XcmPaymentApiError> {
-			match asset.try_as::<AssetId>() {
+			let latest_asset_id: Result<AssetId, ()> = asset.clone().try_into();
+			match latest_asset_id {
 				Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => {
 					// for native token
 					Ok(WeightToFee::weight_to_fee(&weight))
diff --git a/prdoc/pr_7134.prdoc b/prdoc/pr_7134.prdoc
new file mode 100644
index 00000000000..095d4757f43
--- /dev/null
+++ b/prdoc/pr_7134.prdoc
@@ -0,0 +1,11 @@
+title: 'xcm: convert properly assets in xcmpayment apis'
+doc:
+- audience: Runtime User
+  description: |-
+    Port #6459 changes to relays as well, which were probably forgotten in that PR.
+    Thanks!
+crates:
+- name: rococo-runtime
+  bump: patch
+- name: westend-runtime
+  bump: patch
-- 
GitLab


From 5f391db8af50a79db83acfe37f73c7202177d71c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Bastian=20K=C3=B6cher?= <git@kchr.de>
Date: Tue, 14 Jan 2025 21:22:52 +0100
Subject: [PATCH 054/116] PRDOC: Document `validate: false` (#7117)

---
 docs/contributor/prdoc.md | 21 ++++++++++++++++++---
 1 file changed, 18 insertions(+), 3 deletions(-)

diff --git a/docs/contributor/prdoc.md b/docs/contributor/prdoc.md
index 1f6252425e6..b3f7a7e94f0 100644
--- a/docs/contributor/prdoc.md
+++ b/docs/contributor/prdoc.md
@@ -81,9 +81,6 @@ picked if no other applies. The `None` option is equivalent to the `R0-silent` l
 level. Experimental and private APIs are exempt from bumping and can be broken at any time. Please
 read the [Crate Section](../RELEASE.md) of the RELEASE doc about them.
 
-> **Note**: There is currently no CI in place to sanity check this information, but should be added
-> soon.
-
 ### Example
 
 For example when you modified two crates and record the changes:
@@ -106,3 +103,21 @@ you do not need to bump a crate that had a SemVer breaking change only from re-e
 crate with a breaking change.  
 `minor` an `patch` bumps do not need to be inherited, since `cargo` will automatically update them
 to the latest compatible version.
+
+### Overwrite CI check
+
+The `check-semver` CI check is doing sanity checks based on the provided `PRDoc` and the mentioned
+crate version bumps. The tooling is not perfect and it may recommends incorrect bumps of the version.
+The CI check can be forced to accept the provided version bump. This can be done like:
+
+```yaml
+crates:
+  - name: frame-example
+    bump: major
+    validate: false
+  - name: frame-example-pallet
+    bump: minor
+```
+
+By putting `validate: false` for `frame-example`, the version bump is ignored by the tooling. For
+`frame-example-pallet` the version bump is still validated by the CI check.
-- 
GitLab


From d5539aa63edc8068eff9c4cbb78214c3a5ab66b2 Mon Sep 17 00:00:00 2001
From: Sebastian Kunert <skunert49@gmail.com>
Date: Tue, 14 Jan 2025 23:47:19 +0100
Subject: [PATCH 055/116] Parachains: Use relay chain slot for velocity
 measurement (#6825)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

closes #3967

## Changes
We now use relay chain slots to measure velocity on chain. Previously we
were storing the current parachain slot. Then in `on_state_proof` of the
`ConsensusHook` we were checking how many blocks were athored in the
current parachain slot. This works well when the parachain slot time and
relay chain slot time is the same. With elastic scaling, we can have
parachain slot times lower than that of the relay chain. In these cases
we want to measure velocity in relation to the relay chain. This PR
adjusts that.


##  Migration
This PR includes a migration. Storage item `SlotInfo` of pallet
`aura-ext` is renamed to `RelaySlotInfo` to better reflect its new
content. A migration has been added that just kills the old storage
item. `RelaySlotInfo` will be `None` initially but its value will be
adjusted after one new relay chain slot arrives.

---------

Co-authored-by: command-bot <>
Co-authored-by: Bastian Köcher <git@kchr.de>
---
 Cargo.lock                                    |   5 +
 .../consensus/aura/src/collators/lookahead.rs |   1 +
 .../consensus/aura/src/collators/mod.rs       |  30 +-
 .../slot_based/block_builder_task.rs          |  11 +-
 cumulus/client/parachain-inherent/src/mock.rs |  11 +-
 cumulus/pallets/aura-ext/Cargo.toml           |   8 +-
 .../pallets/aura-ext/src/consensus_hook.rs    |  42 ++-
 cumulus/pallets/aura-ext/src/lib.rs           |  26 +-
 cumulus/pallets/aura-ext/src/migration.rs     |  74 ++++
 cumulus/pallets/aura-ext/src/test.rs          | 338 ++++++++++++++++++
 .../parachain-system/src/consensus_hook.rs    |   4 +-
 cumulus/pallets/parachain-system/src/lib.rs   |   4 +-
 .../assets/asset-hub-rococo/src/lib.rs        |   1 +
 .../assets/asset-hub-westend/src/lib.rs       |   2 +-
 .../bridge-hubs/bridge-hub-rococo/src/lib.rs  |   1 +
 .../bridge-hubs/bridge-hub-westend/src/lib.rs |   1 +
 .../collectives-westend/src/lib.rs            |   1 +
 .../contracts/contracts-rococo/src/lib.rs     |   1 +
 .../coretime/coretime-rococo/src/lib.rs       |   1 +
 .../coretime/coretime-westend/src/lib.rs      |   1 +
 .../runtimes/people/people-rococo/src/lib.rs  |   1 +
 .../runtimes/people/people-westend/src/lib.rs |   1 +
 cumulus/primitives/aura/src/lib.rs            |   6 +-
 cumulus/xcm/xcm-emulator/src/lib.rs           |   1 +
 prdoc/pr_6825.prdoc                           |  50 +++
 25 files changed, 560 insertions(+), 62 deletions(-)
 create mode 100644 cumulus/pallets/aura-ext/src/migration.rs
 create mode 100644 cumulus/pallets/aura-ext/src/test.rs
 create mode 100644 prdoc/pr_6825.prdoc

diff --git a/Cargo.lock b/Cargo.lock
index 3eab84d5ed1..7725db743c4 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4874,6 +4874,8 @@ name = "cumulus-pallet-aura-ext"
 version = "0.7.0"
 dependencies = [
  "cumulus-pallet-parachain-system 0.7.0",
+ "cumulus-primitives-core 0.7.0",
+ "cumulus-test-relay-sproof-builder 0.7.0",
  "frame-support 28.0.0",
  "frame-system 28.0.0",
  "pallet-aura 27.0.0",
@@ -4882,7 +4884,10 @@ dependencies = [
  "scale-info",
  "sp-application-crypto 30.0.0",
  "sp-consensus-aura 0.32.0",
+ "sp-core 28.0.0",
+ "sp-io 30.0.0",
  "sp-runtime 31.0.1",
+ "sp-version 29.0.0",
 ]
 
 [[package]]
diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs
index 2dbcf5eb58e..7723de5a576 100644
--- a/cumulus/client/consensus/aura/src/collators/lookahead.rs
+++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs
@@ -336,6 +336,7 @@ where
 				);
 				Some(super::can_build_upon::<_, _, P>(
 					slot_now,
+					relay_slot,
 					timestamp,
 					block_hash,
 					included_block,
diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs
index 89070607fba..031fa963ba6 100644
--- a/cumulus/client/consensus/aura/src/collators/mod.rs
+++ b/cumulus/client/consensus/aura/src/collators/mod.rs
@@ -34,7 +34,7 @@ use polkadot_primitives::{
 	ValidationCodeHash,
 };
 use sc_consensus_aura::{standalone as aura_internal, AuraApi};
-use sp_api::ProvideRuntimeApi;
+use sp_api::{ApiExt, ProvideRuntimeApi};
 use sp_core::Pair;
 use sp_keystore::KeystorePtr;
 use sp_timestamp::Timestamp;
@@ -160,7 +160,8 @@ async fn cores_scheduled_for_para(
 // Checks if we own the slot at the given block and whether there
 // is space in the unincluded segment.
 async fn can_build_upon<Block: BlockT, Client, P>(
-	slot: Slot,
+	para_slot: Slot,
+	relay_slot: Slot,
 	timestamp: Timestamp,
 	parent_hash: Block::Hash,
 	included_block: Block::Hash,
@@ -169,25 +170,28 @@ async fn can_build_upon<Block: BlockT, Client, P>(
 ) -> Option<SlotClaim<P::Public>>
 where
 	Client: ProvideRuntimeApi<Block>,
-	Client::Api: AuraApi<Block, P::Public> + AuraUnincludedSegmentApi<Block>,
+	Client::Api: AuraApi<Block, P::Public> + AuraUnincludedSegmentApi<Block> + ApiExt<Block>,
 	P: Pair,
 	P::Public: Codec,
 	P::Signature: Codec,
 {
 	let runtime_api = client.runtime_api();
 	let authorities = runtime_api.authorities(parent_hash).ok()?;
-	let author_pub = aura_internal::claim_slot::<P>(slot, &authorities, keystore).await?;
+	let author_pub = aura_internal::claim_slot::<P>(para_slot, &authorities, keystore).await?;
 
-	// Here we lean on the property that building on an empty unincluded segment must always
-	// be legal. Skipping the runtime API query here allows us to seamlessly run this
-	// collator against chains which have not yet upgraded their runtime.
-	if parent_hash != included_block &&
-		!runtime_api.can_build_upon(parent_hash, included_block, slot).ok()?
-	{
-		return None
-	}
+	let Ok(Some(api_version)) =
+		runtime_api.api_version::<dyn AuraUnincludedSegmentApi<Block>>(parent_hash)
+	else {
+		return (parent_hash == included_block)
+			.then(|| SlotClaim::unchecked::<P>(author_pub, para_slot, timestamp));
+	};
+
+	let slot = if api_version > 1 { relay_slot } else { para_slot };
 
-	Some(SlotClaim::unchecked::<P>(author_pub, slot, timestamp))
+	runtime_api
+		.can_build_upon(parent_hash, included_block, slot)
+		.ok()?
+		.then(|| SlotClaim::unchecked::<P>(author_pub, para_slot, timestamp))
 }
 
 /// Use [`cumulus_client_consensus_common::find_potential_parents`] to find parachain blocks that
diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs
index 41751f1db53..48287555dea 100644
--- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs
+++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs
@@ -23,7 +23,7 @@ use cumulus_primitives_aura::AuraUnincludedSegmentApi;
 use cumulus_primitives_core::{GetCoreSelectorApi, PersistedValidationData};
 use cumulus_relay_chain_interface::RelayChainInterface;
 
-use polkadot_primitives::Id as ParaId;
+use polkadot_primitives::{Block as RelayBlock, Id as ParaId};
 
 use futures::prelude::*;
 use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider};
@@ -302,8 +302,17 @@ where
 			// on-chain data.
 			collator.collator_service().check_block_status(parent_hash, &parent_header);
 
+			let Ok(relay_slot) =
+				sc_consensus_babe::find_pre_digest::<RelayBlock>(relay_parent_header)
+					.map(|babe_pre_digest| babe_pre_digest.slot())
+			else {
+				tracing::error!(target: crate::LOG_TARGET, "Relay chain does not contain babe slot. This should never happen.");
+				continue;
+			};
+
 			let slot_claim = match crate::collators::can_build_upon::<_, _, P>(
 				para_slot.slot,
+				relay_slot,
 				para_slot.timestamp,
 				parent_hash,
 				included_block,
diff --git a/cumulus/client/parachain-inherent/src/mock.rs b/cumulus/client/parachain-inherent/src/mock.rs
index e08aca93256..8dbc6ace0f0 100644
--- a/cumulus/client/parachain-inherent/src/mock.rs
+++ b/cumulus/client/parachain-inherent/src/mock.rs
@@ -17,8 +17,9 @@
 use crate::{ParachainInherentData, INHERENT_IDENTIFIER};
 use codec::Decode;
 use cumulus_primitives_core::{
-	relay_chain, relay_chain::UpgradeGoAhead, InboundDownwardMessage, InboundHrmpMessage, ParaId,
-	PersistedValidationData,
+	relay_chain,
+	relay_chain::{Slot, UpgradeGoAhead},
+	InboundDownwardMessage, InboundHrmpMessage, ParaId, PersistedValidationData,
 };
 use cumulus_primitives_parachain_inherent::MessageQueueChain;
 use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder;
@@ -28,9 +29,6 @@ use sp_inherents::{InherentData, InherentDataProvider};
 use sp_runtime::traits::Block;
 use std::collections::BTreeMap;
 
-/// Relay chain slot duration, in milliseconds.
-pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000;
-
 /// Inherent data provider that supplies mocked validation data.
 ///
 /// This is useful when running a node that is not actually backed by any relay chain.
@@ -175,8 +173,7 @@ impl<R: Send + Sync + GenerateRandomness<u64>> InherentDataProvider
 		// Calculate the mocked relay block based on the current para block
 		let relay_parent_number =
 			self.relay_offset + self.relay_blocks_per_para_block * self.current_para_block;
-		sproof_builder.current_slot =
-			((relay_parent_number / RELAY_CHAIN_SLOT_DURATION_MILLIS) as u64).into();
+		sproof_builder.current_slot = Slot::from(relay_parent_number as u64);
 
 		sproof_builder.upgrade_go_ahead = self.upgrade_go_ahead;
 		// Process the downward messages and set up the correct head
diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml
index fcda79f1d5c..82638de71aa 100644
--- a/cumulus/pallets/aura-ext/Cargo.toml
+++ b/cumulus/pallets/aura-ext/Cargo.toml
@@ -28,9 +28,15 @@ sp-runtime = { workspace = true }
 cumulus-pallet-parachain-system = { workspace = true }
 
 [dev-dependencies]
-
 # Cumulus
 cumulus-pallet-parachain-system = { workspace = true, default-features = true }
+cumulus-primitives-core = { workspace = true, default-features = true }
+cumulus-test-relay-sproof-builder = { workspace = true, default-features = true }
+
+# Substrate
+sp-core = { workspace = true, default-features = true }
+sp-io = { workspace = true, default-features = true }
+sp-version = { workspace = true, default-features = true }
 
 [features]
 default = ["std"]
diff --git a/cumulus/pallets/aura-ext/src/consensus_hook.rs b/cumulus/pallets/aura-ext/src/consensus_hook.rs
index c1a8568bdd8..56966aa0c8f 100644
--- a/cumulus/pallets/aura-ext/src/consensus_hook.rs
+++ b/cumulus/pallets/aura-ext/src/consensus_hook.rs
@@ -18,7 +18,6 @@
 //! block velocity.
 //!
 //! The velocity `V` refers to the rate of block processing by the relay chain.
-
 use super::{pallet, Aura};
 use core::{marker::PhantomData, num::NonZeroU32};
 use cumulus_pallet_parachain_system::{
@@ -54,8 +53,23 @@ where
 		let velocity = V.max(1);
 		let relay_chain_slot = state_proof.read_slot().expect("failed to read relay chain slot");
 
-		let (slot, authored) =
-			pallet::SlotInfo::<T>::get().expect("slot info is inserted on block initialization");
+		let (relay_chain_slot, authored_in_relay) = match pallet::RelaySlotInfo::<T>::get() {
+			Some((slot, authored)) if slot == relay_chain_slot => (slot, authored),
+			Some((slot, _)) if slot < relay_chain_slot => (relay_chain_slot, 0),
+			Some((slot, _)) => {
+				panic!("Slot moved backwards: stored_slot={slot:?}, relay_chain_slot={relay_chain_slot:?}")
+			},
+			None => (relay_chain_slot, 0),
+		};
+
+		// We need to allow one additional block to be built to fill the unincluded segment.
+		if authored_in_relay > velocity {
+			panic!("authored blocks limit is reached for the slot: relay_chain_slot={relay_chain_slot:?}, authored={authored_in_relay:?}, velocity={velocity:?}");
+		}
+
+		pallet::RelaySlotInfo::<T>::put((relay_chain_slot, authored_in_relay + 1));
+
+		let para_slot = pallet_aura::CurrentSlot::<T>::get();
 
 		// Convert relay chain timestamp.
 		let relay_chain_timestamp =
@@ -67,19 +81,16 @@ where
 
 		// Check that we are not too far in the future. Since we expect `V` parachain blocks
 		// during the relay chain slot, we can allow for `V` parachain slots into the future.
-		if *slot > *para_slot_from_relay + u64::from(velocity) {
+		if *para_slot > *para_slot_from_relay + u64::from(velocity) {
 			panic!(
-				"Parachain slot is too far in the future: parachain_slot: {:?}, derived_from_relay_slot: {:?} velocity: {:?}",
-				slot,
+				"Parachain slot is too far in the future: parachain_slot={:?}, derived_from_relay_slot={:?} velocity={:?}, relay_chain_slot={:?}",
+				para_slot,
 				para_slot_from_relay,
-				velocity
+				velocity,
+				relay_chain_slot
 			);
 		}
 
-		// We need to allow authoring multiple blocks in the same slot.
-		if slot != para_slot_from_relay && authored > velocity {
-			panic!("authored blocks limit is reached for the slot")
-		}
 		let weight = T::DbWeight::get().reads(1);
 
 		(
@@ -110,7 +121,7 @@ impl<
 	/// is more recent than the included block itself.
 	pub fn can_build_upon(included_hash: T::Hash, new_slot: Slot) -> bool {
 		let velocity = V.max(1);
-		let (last_slot, authored_so_far) = match pallet::SlotInfo::<T>::get() {
+		let (last_slot, authored_so_far) = match pallet::RelaySlotInfo::<T>::get() {
 			None => return true,
 			Some(x) => x,
 		};
@@ -123,11 +134,8 @@ impl<
 			return false
 		}
 
-		// TODO: This logic needs to be adjusted.
-		// It checks that we have not authored more than `V + 1` blocks in the slot.
-		// As a slot however, we take the parachain slot here. Velocity should
-		// be measured in relation to the relay chain slot.
-		// https://github.com/paritytech/polkadot-sdk/issues/3967
+		// Check that we have not authored more than `V + 1` parachain blocks in the current relay
+		// chain slot.
 		if last_slot == new_slot {
 			authored_so_far < velocity + 1
 		} else {
diff --git a/cumulus/pallets/aura-ext/src/lib.rs b/cumulus/pallets/aura-ext/src/lib.rs
index dc854eb8201..19c2634ca70 100644
--- a/cumulus/pallets/aura-ext/src/lib.rs
+++ b/cumulus/pallets/aura-ext/src/lib.rs
@@ -40,6 +40,9 @@ use sp_consensus_aura::{digests::CompatibleDigestItem, Slot};
 use sp_runtime::traits::{Block as BlockT, Header as HeaderT};
 
 pub mod consensus_hook;
+pub mod migration;
+mod test;
+
 pub use consensus_hook::FixedVelocityConsensusHook;
 
 type Aura<T> = pallet_aura::Pallet<T>;
@@ -57,6 +60,7 @@ pub mod pallet {
 	pub trait Config: pallet_aura::Config + frame_system::Config {}
 
 	#[pallet::pallet]
+	#[pallet::storage_version(migration::STORAGE_VERSION)]
 	pub struct Pallet<T>(_);
 
 	#[pallet::hooks]
@@ -70,20 +74,7 @@ pub mod pallet {
 			// Fetch the authorities once to get them into the storage proof of the PoV.
 			Authorities::<T>::get();
 
-			let new_slot = pallet_aura::CurrentSlot::<T>::get();
-
-			let (new_slot, authored) = match SlotInfo::<T>::get() {
-				Some((slot, authored)) if slot == new_slot => (slot, authored + 1),
-				Some((slot, _)) if slot < new_slot => (new_slot, 1),
-				Some(..) => {
-					panic!("slot moved backwards")
-				},
-				None => (new_slot, 1),
-			};
-
-			SlotInfo::<T>::put((new_slot, authored));
-
-			T::DbWeight::get().reads_writes(4, 2)
+			T::DbWeight::get().reads_writes(1, 0)
 		}
 	}
 
@@ -99,11 +90,12 @@ pub mod pallet {
 		ValueQuery,
 	>;
 
-	/// Current slot paired with a number of authored blocks.
+	/// Current relay chain slot paired with a number of authored blocks.
 	///
-	/// Updated on each block initialization.
+	/// This is updated in [`FixedVelocityConsensusHook::on_state_proof`] with the current relay
+	/// chain slot as provided by the relay chain state proof.
 	#[pallet::storage]
-	pub(crate) type SlotInfo<T: Config> = StorageValue<_, (Slot, u32), OptionQuery>;
+	pub(crate) type RelaySlotInfo<T: Config> = StorageValue<_, (Slot, u32), OptionQuery>;
 
 	#[pallet::genesis_config]
 	#[derive(frame_support::DefaultNoBound)]
diff --git a/cumulus/pallets/aura-ext/src/migration.rs b/cumulus/pallets/aura-ext/src/migration.rs
new file mode 100644
index 00000000000..b580c19fc73
--- /dev/null
+++ b/cumulus/pallets/aura-ext/src/migration.rs
@@ -0,0 +1,74 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+extern crate alloc;
+
+use crate::{Config, Pallet};
+#[cfg(feature = "try-runtime")]
+use alloc::vec::Vec;
+use frame_support::{migrations::VersionedMigration, pallet_prelude::StorageVersion};
+
+/// The in-code storage version.
+pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1);
+
+mod v0 {
+	use super::*;
+	use frame_support::{pallet_prelude::OptionQuery, storage_alias};
+	use sp_consensus_aura::Slot;
+
+	/// Current slot paired with a number of authored blocks.
+	///
+	/// Updated on each block initialization.
+	#[storage_alias]
+	pub(super) type SlotInfo<T: Config> = StorageValue<Pallet<T>, (Slot, u32), OptionQuery>;
+}
+mod v1 {
+	use super::*;
+	use frame_support::{pallet_prelude::*, traits::UncheckedOnRuntimeUpgrade};
+
+	pub struct UncheckedMigrationToV1<T: Config>(PhantomData<T>);
+
+	impl<T: Config> UncheckedOnRuntimeUpgrade for UncheckedMigrationToV1<T> {
+		fn on_runtime_upgrade() -> Weight {
+			let mut weight: Weight = Weight::zero();
+			weight += migrate::<T>();
+			weight
+		}
+
+		#[cfg(feature = "try-runtime")]
+		fn pre_upgrade() -> Result<Vec<u8>, sp_runtime::TryRuntimeError> {
+			Ok(Vec::new())
+		}
+		#[cfg(feature = "try-runtime")]
+		fn post_upgrade(_state: Vec<u8>) -> Result<(), sp_runtime::TryRuntimeError> {
+			ensure!(!v0::SlotInfo::<T>::exists(), "SlotInfo should not exist");
+			Ok(())
+		}
+	}
+
+	pub fn migrate<T: Config>() -> Weight {
+		v0::SlotInfo::<T>::kill();
+		T::DbWeight::get().writes(1)
+	}
+}
+
+/// Migrate `V0` to `V1`.
+pub type MigrateV0ToV1<T> = VersionedMigration<
+	0,
+	1,
+	v1::UncheckedMigrationToV1<T>,
+	Pallet<T>,
+	<T as frame_system::Config>::DbWeight,
+>;
diff --git a/cumulus/pallets/aura-ext/src/test.rs b/cumulus/pallets/aura-ext/src/test.rs
new file mode 100644
index 00000000000..b0099381e68
--- /dev/null
+++ b/cumulus/pallets/aura-ext/src/test.rs
@@ -0,0 +1,338 @@
+// Copyright Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+#![cfg(test)]
+extern crate alloc;
+
+use super::*;
+
+use core::num::NonZeroU32;
+use cumulus_pallet_parachain_system::{
+	consensus_hook::ExpectParentIncluded, AnyRelayNumber, DefaultCoreSelector, ParachainSetCode,
+};
+use cumulus_primitives_core::ParaId;
+use frame_support::{
+	derive_impl,
+	pallet_prelude::ConstU32,
+	parameter_types,
+	traits::{ConstBool, ConstU64, EnqueueWithOrigin},
+};
+use sp_io::TestExternalities;
+use sp_version::RuntimeVersion;
+
+type Block = frame_system::mocking::MockBlock<Test>;
+
+frame_support::construct_runtime!(
+	pub enum Test {
+		System: frame_system,
+		ParachainSystem: cumulus_pallet_parachain_system,
+		Aura: pallet_aura,
+		AuraExt: crate,
+	}
+);
+
+parameter_types! {
+	pub Version: RuntimeVersion = RuntimeVersion {
+		spec_name: "test".into(),
+		impl_name: "system-test".into(),
+		authoring_version: 1,
+		spec_version: 1,
+		impl_version: 1,
+		apis: sp_version::create_apis_vec!([]),
+		transaction_version: 1,
+		system_version: 1,
+	};
+}
+
+#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
+impl frame_system::Config for Test {
+	type Block = Block;
+	type Version = Version;
+	type OnSetCode = ParachainSetCode<Test>;
+	type RuntimeEvent = ();
+}
+
+impl crate::Config for Test {}
+
+impl pallet_aura::Config for Test {
+	type AuthorityId = sp_consensus_aura::sr25519::AuthorityId;
+	type MaxAuthorities = ConstU32<100_000>;
+	type DisabledValidators = ();
+	type AllowMultipleBlocksPerSlot = ConstBool<true>;
+	type SlotDuration = ConstU64<6000>;
+}
+
+impl pallet_timestamp::Config for Test {
+	type Moment = u64;
+	type OnTimestampSet = ();
+	type MinimumPeriod = ();
+	type WeightInfo = ();
+}
+
+impl cumulus_pallet_parachain_system::Config for Test {
+	type WeightInfo = ();
+	type RuntimeEvent = ();
+	type OnSystemEvent = ();
+	type SelfParaId = ();
+	type OutboundXcmpMessageSource = ();
+	// Ignore all DMP messages by enqueueing them into `()`:
+	type DmpQueue = EnqueueWithOrigin<(), sp_core::ConstU8<0>>;
+	type ReservedDmpWeight = ();
+	type XcmpMessageHandler = ();
+	type ReservedXcmpWeight = ();
+	type CheckAssociatedRelayNumber = AnyRelayNumber;
+	type ConsensusHook = ExpectParentIncluded;
+	type SelectCore = DefaultCoreSelector<Test>;
+}
+
+#[cfg(test)]
+mod test {
+	use crate::test::*;
+	use cumulus_pallet_parachain_system::{
+		Ancestor, ConsensusHook, RelayChainStateProof, UsedBandwidth,
+	};
+	use sp_core::H256;
+
+	fn set_ancestors() {
+		let mut ancestors = Vec::new();
+		for i in 0..3 {
+			let mut ancestor = Ancestor::new_unchecked(UsedBandwidth::default(), None);
+			ancestor.replace_para_head_hash(H256::repeat_byte(i + 1));
+			ancestors.push(ancestor);
+		}
+		cumulus_pallet_parachain_system::UnincludedSegment::<Test>::put(ancestors);
+	}
+
+	pub fn new_test_ext(para_slot: u64) -> sp_io::TestExternalities {
+		let mut ext = TestExternalities::new_empty();
+		ext.execute_with(|| {
+			set_ancestors();
+			// Set initial parachain slot
+			pallet_aura::CurrentSlot::<Test>::put(Slot::from(para_slot));
+		});
+		ext
+	}
+
+	fn set_relay_slot(slot: u64, authored: u32) {
+		RelaySlotInfo::<Test>::put((Slot::from(slot), authored))
+	}
+
+	fn relay_chain_state_proof(relay_slot: u64) -> RelayChainStateProof {
+		let mut builder = cumulus_test_relay_sproof_builder::RelayStateSproofBuilder::default();
+		builder.current_slot = relay_slot.into();
+
+		let (hash, state_proof) = builder.into_state_root_and_proof();
+
+		RelayChainStateProof::new(ParaId::from(200), hash, state_proof)
+			.expect("Should be able to construct state proof.")
+	}
+
+	fn assert_slot_info(expected_slot: u64, expected_authored: u32) {
+		let (slot, authored) = pallet::RelaySlotInfo::<Test>::get().unwrap();
+		assert_eq!(slot, Slot::from(expected_slot), "Slot stored in RelaySlotInfo is incorrect.");
+		assert_eq!(
+			authored, expected_authored,
+			"Number of authored blocks stored in RelaySlotInfo is incorrect."
+		);
+	}
+
+	#[test]
+	fn test_velocity() {
+		type Hook = FixedVelocityConsensusHook<Test, 6000, 2, 1>;
+
+		new_test_ext(1).execute_with(|| {
+			let state_proof = relay_chain_state_proof(10);
+			let (_, capacity) = Hook::on_state_proof(&state_proof);
+			assert_eq!(capacity, NonZeroU32::new(1).unwrap().into());
+			assert_slot_info(10, 1);
+
+			let (_, capacity) = Hook::on_state_proof(&state_proof);
+			assert_eq!(capacity, NonZeroU32::new(1).unwrap().into());
+			assert_slot_info(10, 2);
+		});
+	}
+
+	#[test]
+	#[should_panic(expected = "authored blocks limit is reached for the slot")]
+	fn test_exceeding_velocity_limit() {
+		const VELOCITY: u32 = 2;
+		type Hook = FixedVelocityConsensusHook<Test, 6000, VELOCITY, 1>;
+
+		new_test_ext(1).execute_with(|| {
+			let state_proof = relay_chain_state_proof(10);
+			for authored in 0..=VELOCITY + 1 {
+				Hook::on_state_proof(&state_proof);
+				assert_slot_info(10, authored + 1);
+			}
+		});
+	}
+
+	#[test]
+	fn test_para_slot_calculated_from_slot_duration() {
+		const VELOCITY: u32 = 2;
+		type Hook = FixedVelocityConsensusHook<Test, 3000, VELOCITY, 1>;
+
+		new_test_ext(6).execute_with(|| {
+			let state_proof = relay_chain_state_proof(10);
+			Hook::on_state_proof(&state_proof);
+
+			let para_slot = Slot::from(7);
+			pallet_aura::CurrentSlot::<Test>::put(para_slot);
+			Hook::on_state_proof(&state_proof);
+		});
+	}
+
+	#[test]
+	fn test_velocity_at_least_one() {
+		// Even though this is 0, one block should always be allowed.
+		const VELOCITY: u32 = 0;
+		type Hook = FixedVelocityConsensusHook<Test, 6000, VELOCITY, 1>;
+
+		new_test_ext(6).execute_with(|| {
+			let state_proof = relay_chain_state_proof(10);
+			Hook::on_state_proof(&state_proof);
+		});
+	}
+
+	#[test]
+	#[should_panic(
+		expected = "Parachain slot is too far in the future: parachain_slot=Slot(8), derived_from_relay_slot=Slot(5) velocity=2"
+	)]
+	fn test_para_slot_calculated_from_slot_duration_2() {
+		const VELOCITY: u32 = 2;
+		type Hook = FixedVelocityConsensusHook<Test, 3000, VELOCITY, 1>;
+
+		new_test_ext(8).execute_with(|| {
+			let state_proof = relay_chain_state_proof(10);
+			let (_, _) = Hook::on_state_proof(&state_proof);
+		});
+	}
+
+	#[test]
+	fn test_velocity_resets_on_new_relay_slot() {
+		const VELOCITY: u32 = 2;
+		type Hook = FixedVelocityConsensusHook<Test, 6000, VELOCITY, 1>;
+
+		new_test_ext(1).execute_with(|| {
+			let state_proof = relay_chain_state_proof(10);
+			for authored in 0..=VELOCITY {
+				Hook::on_state_proof(&state_proof);
+				assert_slot_info(10, authored + 1);
+			}
+
+			let state_proof = relay_chain_state_proof(11);
+			for authored in 0..=VELOCITY {
+				Hook::on_state_proof(&state_proof);
+				assert_slot_info(11, authored + 1);
+			}
+		});
+	}
+
+	#[test]
+	#[should_panic(
+		expected = "Slot moved backwards: stored_slot=Slot(10), relay_chain_slot=Slot(9)"
+	)]
+	fn test_backward_relay_slot_not_tolerated() {
+		type Hook = FixedVelocityConsensusHook<Test, 6000, 2, 1>;
+
+		new_test_ext(1).execute_with(|| {
+			let state_proof = relay_chain_state_proof(10);
+			Hook::on_state_proof(&state_proof);
+			assert_slot_info(10, 1);
+
+			let state_proof = relay_chain_state_proof(9);
+			Hook::on_state_proof(&state_proof);
+		});
+	}
+
+	#[test]
+	#[should_panic(
+		expected = "Parachain slot is too far in the future: parachain_slot=Slot(13), derived_from_relay_slot=Slot(10) velocity=2"
+	)]
+	fn test_future_parachain_slot_errors() {
+		type Hook = FixedVelocityConsensusHook<Test, 6000, 2, 1>;
+
+		new_test_ext(13).execute_with(|| {
+			let state_proof = relay_chain_state_proof(10);
+			Hook::on_state_proof(&state_proof);
+		});
+	}
+
+	#[test]
+	fn test_can_build_upon_true_when_empty() {
+		const VELOCITY: u32 = 2;
+		type Hook = FixedVelocityConsensusHook<Test, 6000, VELOCITY, 1>;
+
+		new_test_ext(1).execute_with(|| {
+			let hash = H256::repeat_byte(0x1);
+			assert!(Hook::can_build_upon(hash, Slot::from(1)));
+		});
+	}
+
+	#[test]
+	fn test_can_build_upon_respects_velocity() {
+		const VELOCITY: u32 = 2;
+		type Hook = FixedVelocityConsensusHook<Test, 6000, VELOCITY, 10>;
+
+		new_test_ext(1).execute_with(|| {
+			let hash = H256::repeat_byte(0x1);
+			let relay_slot = Slot::from(10);
+
+			set_relay_slot(10, VELOCITY - 1);
+			assert!(Hook::can_build_upon(hash, relay_slot));
+
+			set_relay_slot(10, VELOCITY);
+			assert!(Hook::can_build_upon(hash, relay_slot));
+
+			set_relay_slot(10, VELOCITY + 1);
+			// Velocity too high
+			assert!(!Hook::can_build_upon(hash, relay_slot));
+		});
+	}
+
+	#[test]
+	fn test_can_build_upon_slot_can_not_decrease() {
+		const VELOCITY: u32 = 2;
+		type Hook = FixedVelocityConsensusHook<Test, 6000, VELOCITY, 10>;
+
+		new_test_ext(1).execute_with(|| {
+			let hash = H256::repeat_byte(0x1);
+
+			set_relay_slot(10, VELOCITY);
+			// Slot moves backwards
+			assert!(!Hook::can_build_upon(hash, Slot::from(9)));
+		});
+	}
+
+	#[test]
+	fn test_can_build_upon_unincluded_segment_size() {
+		const VELOCITY: u32 = 2;
+		type Hook = FixedVelocityConsensusHook<Test, 6000, VELOCITY, 2>;
+
+		new_test_ext(1).execute_with(|| {
+			let relay_slot = Slot::from(10);
+
+			set_relay_slot(10, VELOCITY);
+			// Size after included is two, we can not build
+			let hash = H256::repeat_byte(0x1);
+			assert!(!Hook::can_build_upon(hash, relay_slot));
+
+			// Size after included is one, we can build
+			let hash = H256::repeat_byte(0x2);
+			assert!(Hook::can_build_upon(hash, relay_slot));
+		});
+	}
+}
diff --git a/cumulus/pallets/parachain-system/src/consensus_hook.rs b/cumulus/pallets/parachain-system/src/consensus_hook.rs
index 3062396a4e7..6d65bdc7718 100644
--- a/cumulus/pallets/parachain-system/src/consensus_hook.rs
+++ b/cumulus/pallets/parachain-system/src/consensus_hook.rs
@@ -22,7 +22,7 @@ use core::num::NonZeroU32;
 use frame_support::weights::Weight;
 
 /// The possible capacity of the unincluded segment.
-#[derive(Clone)]
+#[derive(Clone, Debug, PartialEq)]
 pub struct UnincludedSegmentCapacity(UnincludedSegmentCapacityInner);
 
 impl UnincludedSegmentCapacity {
@@ -41,7 +41,7 @@ impl UnincludedSegmentCapacity {
 	}
 }
 
-#[derive(Clone)]
+#[derive(Clone, Debug, PartialEq)]
 pub(crate) enum UnincludedSegmentCapacityInner {
 	ExpectParentIncluded,
 	Value(NonZeroU32),
diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs
index 0fa759357f6..6857b08e66b 100644
--- a/cumulus/pallets/parachain-system/src/lib.rs
+++ b/cumulus/pallets/parachain-system/src/lib.rs
@@ -80,8 +80,7 @@ pub mod relay_state_snapshot;
 pub mod validate_block;
 
 use unincluded_segment::{
-	Ancestor, HrmpChannelUpdate, HrmpWatermarkUpdate, OutboundBandwidthLimits, SegmentTracker,
-	UsedBandwidth,
+	HrmpChannelUpdate, HrmpWatermarkUpdate, OutboundBandwidthLimits, SegmentTracker,
 };
 
 pub use consensus_hook::{ConsensusHook, ExpectParentIncluded};
@@ -109,6 +108,7 @@ pub use consensus_hook::{ConsensusHook, ExpectParentIncluded};
 /// ```
 pub use cumulus_pallet_parachain_system_proc_macro::register_validate_block;
 pub use relay_state_snapshot::{MessagingStateSnapshot, RelayChainStateProof};
+pub use unincluded_segment::{Ancestor, UsedBandwidth};
 
 pub use pallet::*;
 
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
index 1db152e39fd..db9a8201ebb 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
@@ -1050,6 +1050,7 @@ pub type Migrations = (
 	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
+	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
 );
 
 parameter_types! {
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
index 71cfdc58cce..cfc150ce5d6 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
@@ -341,7 +341,6 @@ pub type LocalAndForeignAssets = fungibles::UnionOf<
 	xcm::v5::Location,
 	AccountId,
 >;
-
 /// Union fungibles implementation for [`LocalAndForeignAssets`] and `Balances`.
 pub type NativeAndAssets = fungible::UnionOf<
 	Balances,
@@ -1129,6 +1128,7 @@ pub type Migrations = (
 	>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
+	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
 );
 
 /// Asset Hub Westend has some undecodable storage, delete it.
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
index 35af034310d..67bc06a9321 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
@@ -184,6 +184,7 @@ pub type Migrations = (
 	pallet_bridge_relayers::migration::v1::MigrationToV1<Runtime, ()>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
+	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
 );
 
 parameter_types! {
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
index 2c2e01b4d21..3824a4e9a7c 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
@@ -171,6 +171,7 @@ pub type Migrations = (
 	bridge_to_ethereum_config::migrations::MigrationForXcmV5<Runtime>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
+	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
 );
 
 parameter_types! {
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
index e9adc4d1eae..5eafc2960cc 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
@@ -770,6 +770,7 @@ type Migrations = (
 	pallet_core_fellowship::migration::MigrateV0ToV1<Runtime, FellowshipCoreInstance>,
 	// unreleased
 	pallet_core_fellowship::migration::MigrateV0ToV1<Runtime, AmbassadorCoreInstance>,
+	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
 );
 
 /// Executive: handles dispatch to the various modules.
diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
index 3348a635df0..eaaaf0a9a9a 100644
--- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
@@ -118,6 +118,7 @@ pub type Migrations = (
 	cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5<Runtime>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
+	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
 );
 
 type EventRecord = frame_system::EventRecord<
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
index e9171c79afa..622a40e1d8d 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs
@@ -129,6 +129,7 @@ pub type Migrations = (
 	pallet_broker::migration::MigrateV3ToV4<Runtime, BrokerMigrationV4BlockConversion>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
+	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
 );
 
 /// Executive: handles dispatch to the various modules.
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
index 975856b3b6f..7312c9c1639 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs
@@ -129,6 +129,7 @@ pub type Migrations = (
 	pallet_broker::migration::MigrateV3ToV4<Runtime, BrokerMigrationV4BlockConversion>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
+	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
 );
 
 /// Executive: handles dispatch to the various modules.
diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
index ffdd86c500e..cb0282b17a6 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs
@@ -116,6 +116,7 @@ pub type Migrations = (
 	cumulus_pallet_xcmp_queue::migration::v5::MigrateV4ToV5<Runtime>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
+	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
 );
 
 /// Executive: handles dispatch to the various modules.
diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
index ee6b0db55b9..050256dd4f6 100644
--- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs
@@ -115,6 +115,7 @@ pub type Migrations = (
 	pallet_collator_selection::migration::v2::MigrationToV2<Runtime>,
 	// permanent
 	pallet_xcm::migration::MigrateToLatestXcmVersion<Runtime>,
+	cumulus_pallet_aura_ext::migration::MigrateV0ToV1<Runtime>,
 );
 
 /// Executive: handles dispatch to the various modules.
diff --git a/cumulus/primitives/aura/src/lib.rs b/cumulus/primitives/aura/src/lib.rs
index aeeee5f8baf..4e7d7dc3e79 100644
--- a/cumulus/primitives/aura/src/lib.rs
+++ b/cumulus/primitives/aura/src/lib.rs
@@ -34,10 +34,14 @@ sp_api::decl_runtime_apis! {
 	/// When the unincluded segment is short, Aura chains will allow authors to create multiple
 	/// blocks per slot in order to build a backlog. When it is saturated, this API will limit
 	/// the amount of blocks that can be created.
+	///
+	/// Changes:
+	/// - Version 2: Update to `can_build_upon` to take a relay chain `Slot` instead of a parachain `Slot`.
+	#[api_version(2)]
 	pub trait AuraUnincludedSegmentApi {
 		/// Whether it is legal to extend the chain, assuming the given block is the most
 		/// recently included one as-of the relay parent that will be built against, and
-		/// the given slot.
+		/// the given relay chain slot.
 		///
 		/// This should be consistent with the logic the runtime uses when validating blocks to
 		/// avoid issues.
diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs
index ff14b747973..d9b1e7fd9d0 100644
--- a/cumulus/xcm/xcm-emulator/src/lib.rs
+++ b/cumulus/xcm/xcm-emulator/src/lib.rs
@@ -1118,6 +1118,7 @@ macro_rules! decl_test_networks {
 				) -> $crate::ParachainInherentData {
 					let mut sproof = $crate::RelayStateSproofBuilder::default();
 					sproof.para_id = para_id.into();
+					sproof.current_slot = $crate::polkadot_primitives::Slot::from(relay_parent_number as u64);
 
 					// egress channel
 					let e_index = sproof.hrmp_egress_channel_index.get_or_insert_with(Vec::new);
diff --git a/prdoc/pr_6825.prdoc b/prdoc/pr_6825.prdoc
new file mode 100644
index 00000000000..d57b2b573a1
--- /dev/null
+++ b/prdoc/pr_6825.prdoc
@@ -0,0 +1,50 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Use relay chain slot for velocity measurement on parachains
+
+doc:
+  - audience: Runtime Dev
+    description: |
+      The AuraExt pallets `ConsensusHook` is performing checks based on a parachains velocity. It was previously
+      checking how many blocks where produced in a given parachain slot. This only works well when the parachain
+      and relay chain slot length is the same. After this PR, we are checking against the relay chain slot.
+
+      **🚨 Action Required:** A migration of name `cumulus_pallet_aura_ext::migration::MigrateV0ToV1` is included
+      that cleans up a renamed storage item. Parachain must add it to their runtimes. More information is available in
+      the [reference docs](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/frame_runtime_upgrades_and_migrations/index.html#single-block-migrations).
+
+crates:
+  - name: cumulus-pallet-parachain-system
+    bump: minor
+  - name: cumulus-pallet-aura-ext
+    bump: major
+  - name: cumulus-primitives-aura
+    bump: major
+  - name: cumulus-client-parachain-inherent
+    bump: minor
+  - name: cumulus-client-consensus-aura
+    bump: minor
+  - name: xcm-emulator
+    bump: minor
+  - name: asset-hub-rococo-runtime
+    bump: minor
+  - name: asset-hub-westend-runtime
+    bump: minor
+  - name: bridge-hub-rococo-runtime
+    bump: minor
+  - name: bridge-hub-westend-runtime
+    bump: minor
+  - name: collectives-westend-runtime
+    bump: minor
+  - name: coretime-rococo-runtime
+    bump: minor
+  - name: coretime-westend-runtime
+    bump: minor
+  - name: people-rococo-runtime
+    bump: minor
+  - name: people-westend-runtime
+    bump: minor
+  - name: contracts-rococo-runtime
+    bump: minor
+        
-- 
GitLab


From 0d660a420fbc11a90cde5aa4e43ce2027b502162 Mon Sep 17 00:00:00 2001
From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com>
Date: Wed, 15 Jan 2025 11:13:23 +0200
Subject: [PATCH 056/116] approval-voting: Make importing of duplicate
 assignment idempotent (#6971)

Normally, approval-voting wouldn't receive duplicate assignments because
approval-distribution makes sure of it, however in the situation where
we restart we might receive the same assignment again and since
approval-voting already persisted it we will end up inserting it twice
in `ApprovalEntry.tranches.assignments` because that's an array.

Fix this by making sure duplicate assignments are a noop if the
validator already had an assignment imported at the same tranche.

---------

Signed-off-by: Alexandru Gheorghe <alexandru.gheorghe@parity.io>
Co-authored-by: ordian <write@reusable.software>
---
 .../approval-voting/src/approval_checking.rs  | 78 ++++++++++++-------
 polkadot/node/core/approval-voting/src/lib.rs | 11 ++-
 .../approval-voting/src/persisted_entries.rs  | 14 +++-
 prdoc/pr_6971.prdoc                           | 16 ++++
 4 files changed, 86 insertions(+), 33 deletions(-)
 create mode 100644 prdoc/pr_6971.prdoc

diff --git a/polkadot/node/core/approval-voting/src/approval_checking.rs b/polkadot/node/core/approval-voting/src/approval_checking.rs
index 3b7262a4682..c7f38619ea1 100644
--- a/polkadot/node/core/approval-voting/src/approval_checking.rs
+++ b/polkadot/node/core/approval-voting/src/approval_checking.rs
@@ -712,13 +712,13 @@ mod tests {
 		}
 		.into();
 
-		approval_entry.import_assignment(0, ValidatorIndex(0), block_tick);
-		approval_entry.import_assignment(0, ValidatorIndex(1), block_tick);
+		approval_entry.import_assignment(0, ValidatorIndex(0), block_tick, false);
+		approval_entry.import_assignment(0, ValidatorIndex(1), block_tick, false);
 
-		approval_entry.import_assignment(1, ValidatorIndex(2), block_tick + 1);
-		approval_entry.import_assignment(1, ValidatorIndex(3), block_tick + 1);
+		approval_entry.import_assignment(1, ValidatorIndex(2), block_tick + 1, false);
+		approval_entry.import_assignment(1, ValidatorIndex(3), block_tick + 1, false);
 
-		approval_entry.import_assignment(2, ValidatorIndex(4), block_tick + 2);
+		approval_entry.import_assignment(2, ValidatorIndex(4), block_tick + 2, false);
 
 		let approvals = bitvec![u8, BitOrderLsb0; 1; 5];
 
@@ -757,8 +757,10 @@ mod tests {
 		}
 		.into();
 
-		approval_entry.import_assignment(0, ValidatorIndex(0), block_tick);
-		approval_entry.import_assignment(1, ValidatorIndex(2), block_tick);
+		approval_entry.import_assignment(0, ValidatorIndex(0), block_tick, false);
+		approval_entry.import_assignment(0, ValidatorIndex(0), block_tick, true);
+		approval_entry.import_assignment(1, ValidatorIndex(2), block_tick, false);
+		approval_entry.import_assignment(1, ValidatorIndex(2), block_tick, true);
 
 		let approvals = bitvec![u8, BitOrderLsb0; 0; 10];
 
@@ -798,10 +800,10 @@ mod tests {
 		}
 		.into();
 
-		approval_entry.import_assignment(0, ValidatorIndex(0), block_tick);
-		approval_entry.import_assignment(0, ValidatorIndex(1), block_tick);
+		approval_entry.import_assignment(0, ValidatorIndex(0), block_tick, false);
+		approval_entry.import_assignment(0, ValidatorIndex(1), block_tick, false);
 
-		approval_entry.import_assignment(1, ValidatorIndex(2), block_tick);
+		approval_entry.import_assignment(1, ValidatorIndex(2), block_tick, false);
 
 		let mut approvals = bitvec![u8, BitOrderLsb0; 0; 10];
 		approvals.set(0, true);
@@ -844,11 +846,11 @@ mod tests {
 		}
 		.into();
 
-		approval_entry.import_assignment(0, ValidatorIndex(0), block_tick);
-		approval_entry.import_assignment(0, ValidatorIndex(1), block_tick);
+		approval_entry.import_assignment(0, ValidatorIndex(0), block_tick, false);
+		approval_entry.import_assignment(0, ValidatorIndex(1), block_tick, false);
 
-		approval_entry.import_assignment(1, ValidatorIndex(2), block_tick);
-		approval_entry.import_assignment(1, ValidatorIndex(3), block_tick);
+		approval_entry.import_assignment(1, ValidatorIndex(2), block_tick, false);
+		approval_entry.import_assignment(1, ValidatorIndex(3), block_tick, false);
 
 		let mut approvals = bitvec![u8, BitOrderLsb0; 0; n_validators];
 		approvals.set(0, true);
@@ -913,14 +915,24 @@ mod tests {
 		}
 		.into();
 
-		approval_entry.import_assignment(0, ValidatorIndex(0), block_tick);
-		approval_entry.import_assignment(0, ValidatorIndex(1), block_tick);
+		approval_entry.import_assignment(0, ValidatorIndex(0), block_tick, false);
+		approval_entry.import_assignment(0, ValidatorIndex(1), block_tick, false);
 
-		approval_entry.import_assignment(1, ValidatorIndex(2), block_tick + 1);
-		approval_entry.import_assignment(1, ValidatorIndex(3), block_tick + 1);
+		approval_entry.import_assignment(1, ValidatorIndex(2), block_tick + 1, false);
+		approval_entry.import_assignment(1, ValidatorIndex(3), block_tick + 1, false);
 
-		approval_entry.import_assignment(2, ValidatorIndex(4), block_tick + no_show_duration + 2);
-		approval_entry.import_assignment(2, ValidatorIndex(5), block_tick + no_show_duration + 2);
+		approval_entry.import_assignment(
+			2,
+			ValidatorIndex(4),
+			block_tick + no_show_duration + 2,
+			false,
+		);
+		approval_entry.import_assignment(
+			2,
+			ValidatorIndex(5),
+			block_tick + no_show_duration + 2,
+			false,
+		);
 
 		let mut approvals = bitvec![u8, BitOrderLsb0; 0; n_validators];
 		approvals.set(0, true);
@@ -1007,14 +1019,24 @@ mod tests {
 		}
 		.into();
 
-		approval_entry.import_assignment(0, ValidatorIndex(0), block_tick);
-		approval_entry.import_assignment(0, ValidatorIndex(1), block_tick);
+		approval_entry.import_assignment(0, ValidatorIndex(0), block_tick, false);
+		approval_entry.import_assignment(0, ValidatorIndex(1), block_tick, false);
 
-		approval_entry.import_assignment(1, ValidatorIndex(2), block_tick + 1);
-		approval_entry.import_assignment(1, ValidatorIndex(3), block_tick + 1);
+		approval_entry.import_assignment(1, ValidatorIndex(2), block_tick + 1, false);
+		approval_entry.import_assignment(1, ValidatorIndex(3), block_tick + 1, false);
 
-		approval_entry.import_assignment(2, ValidatorIndex(4), block_tick + no_show_duration + 2);
-		approval_entry.import_assignment(2, ValidatorIndex(5), block_tick + no_show_duration + 2);
+		approval_entry.import_assignment(
+			2,
+			ValidatorIndex(4),
+			block_tick + no_show_duration + 2,
+			false,
+		);
+		approval_entry.import_assignment(
+			2,
+			ValidatorIndex(5),
+			block_tick + no_show_duration + 2,
+			false,
+		);
 
 		let mut approvals = bitvec![u8, BitOrderLsb0; 0; n_validators];
 		approvals.set(0, true);
@@ -1066,7 +1088,7 @@ mod tests {
 			},
 		);
 
-		approval_entry.import_assignment(3, ValidatorIndex(6), block_tick);
+		approval_entry.import_assignment(3, ValidatorIndex(6), block_tick, false);
 		approvals.set(6, true);
 
 		let tranche_now = no_show_duration as DelayTranche + 3;
@@ -1176,7 +1198,7 @@ mod tests {
 			// Populate the requested tranches. The assignments aren't inspected in
 			// this test.
 			for &t in &test_tranche {
-				approval_entry.import_assignment(t, ValidatorIndex(0), 0)
+				approval_entry.import_assignment(t, ValidatorIndex(0), 0, false);
 			}
 
 			let filled_tranches = filled_tranche_iterator(approval_entry.tranches());
diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs
index b4c2a6afee0..2deca5a1aba 100644
--- a/polkadot/node/core/approval-voting/src/lib.rs
+++ b/polkadot/node/core/approval-voting/src/lib.rs
@@ -2808,8 +2808,15 @@ where
 						Vec::new(),
 					)),
 			};
-			is_duplicate &= approval_entry.is_assigned(assignment.validator);
-			approval_entry.import_assignment(tranche, assignment.validator, tick_now);
+
+			let is_duplicate_for_candidate = approval_entry.is_assigned(assignment.validator);
+			is_duplicate &= is_duplicate_for_candidate;
+			approval_entry.import_assignment(
+				tranche,
+				assignment.validator,
+				tick_now,
+				is_duplicate_for_candidate,
+			);
 
 			// We've imported a new assignment, so we need to schedule a wake-up for when that might
 			// no-show.
diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs
index a5d42d9fd6e..14c678913dc 100644
--- a/polkadot/node/core/approval-voting/src/persisted_entries.rs
+++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs
@@ -172,7 +172,7 @@ impl ApprovalEntry {
 		});
 
 		our.map(|a| {
-			self.import_assignment(a.tranche(), a.validator_index(), tick_now);
+			self.import_assignment(a.tranche(), a.validator_index(), tick_now, false);
 
 			(a.cert().clone(), a.validator_index(), a.tranche())
 		})
@@ -197,6 +197,7 @@ impl ApprovalEntry {
 		tranche: DelayTranche,
 		validator_index: ValidatorIndex,
 		tick_now: Tick,
+		is_duplicate: bool,
 	) {
 		// linear search probably faster than binary. not many tranches typically.
 		let idx = match self.tranches.iter().position(|t| t.tranche >= tranche) {
@@ -213,8 +214,15 @@ impl ApprovalEntry {
 				self.tranches.len() - 1
 			},
 		};
-
-		self.tranches[idx].assignments.push((validator_index, tick_now));
+		// At restart we might have duplicate assignments because approval-distribution is not
+		// persistent across restarts, so avoid adding duplicates.
+		// We already know if we have seen an assignment from this validator and since this
+		// function is on the hot path we can avoid iterating through tranches by using
+		// !is_duplicate to determine if it is already present in the vector and does not need
+		// adding.
+		if !is_duplicate {
+			self.tranches[idx].assignments.push((validator_index, tick_now));
+		}
 		self.assigned_validators.set(validator_index.0 as _, true);
 	}
 
diff --git a/prdoc/pr_6971.prdoc b/prdoc/pr_6971.prdoc
new file mode 100644
index 00000000000..4790d773fee
--- /dev/null
+++ b/prdoc/pr_6971.prdoc
@@ -0,0 +1,16 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Make importing of duplicate assignment idempotent
+
+doc:
+  - audience: Node Dev
+    description: |
+      Normally, approval-voting wouldn't receive duplicate assignments because approval-distribution makes
+      sure of it, however in the situation where we restart we might receive the same assignment again and
+      since approval-voting already persisted it we will end up inserting it twice in ApprovalEntry.tranches.assignments 
+      because that's an array. Fix this by inserting only assignments that are not duplicate.
+
+crates:
+  - name: polkadot-node-core-approval-voting
+    bump: minor
-- 
GitLab


From f798111afc15f464a772cd7ed37910cc6208b713 Mon Sep 17 00:00:00 2001
From: Sebastian Kunert <skunert49@gmail.com>
Date: Wed, 15 Jan 2025 11:08:49 +0100
Subject: [PATCH 057/116] Fix reversed error message in DispatchInfo (#7170)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Fix error message in `DispatchInfo` where post-dispatch and pre-dispatch
weight was reversed.

---------

Co-authored-by: command-bot <>
Co-authored-by: Bastian Köcher <git@kchr.de>
---
 prdoc/pr_7170.prdoc                     | 8 ++++++++
 substrate/frame/support/src/dispatch.rs | 6 ++----
 2 files changed, 10 insertions(+), 4 deletions(-)
 create mode 100644 prdoc/pr_7170.prdoc

diff --git a/prdoc/pr_7170.prdoc b/prdoc/pr_7170.prdoc
new file mode 100644
index 00000000000..fae908f7407
--- /dev/null
+++ b/prdoc/pr_7170.prdoc
@@ -0,0 +1,8 @@
+title: Fix reversed error message in DispatchInfo
+doc:
+- audience: Runtime Dev
+  description: "Fix error message in `DispatchInfo` where post-dispatch and pre-dispatch\
+    \ weight was reversed.\r\n"
+crates:
+- name: frame-support
+  bump: patch
diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs
index 99099683003..14bc2667def 100644
--- a/substrate/frame/support/src/dispatch.rs
+++ b/substrate/frame/support/src/dispatch.rs
@@ -315,10 +315,8 @@ impl PostDispatchInfo {
 					"Post dispatch weight is greater than pre dispatch weight. \
 					Pre dispatch weight may underestimating the actual weight. \
 					Greater post dispatch weight components are ignored.
-					Pre dispatch weight: {:?},
-					Post dispatch weight: {:?}",
-					actual_weight,
-					info_total_weight,
+					Pre dispatch weight: {info_total_weight:?},
+					Post dispatch weight: {actual_weight:?}",
 				);
 			}
 			actual_weight.min(info.total_weight())
-- 
GitLab


From b72e76fba819e8029df27d127c57e3d6f532f1b8 Mon Sep 17 00:00:00 2001
From: Xavier Lau <x@acg.box>
Date: Wed, 15 Jan 2025 18:57:37 +0800
Subject: [PATCH 058/116] Add "run to block" tools (#7109)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Introduce `frame_system::Pallet::run_to_block`,
`frame_system::Pallet::run_to_block_with`, and
`frame_system::RunToBlockHooks` to establish a generic `run_to_block`
mechanism for mock tests, minimizing redundant implementations across
various pallets.

Closes #299.

---

Polkadot address: 156HGo9setPcU2qhFMVWLkcmtCEGySLwNqa3DaEiYSWtte4Y

---------

Signed-off-by: Xavier Lau <x@acg.box>
Co-authored-by: Bastian Köcher <git@kchr.de>
Co-authored-by: command-bot <>
Co-authored-by: Guillaume Thiolliere <guillaume.thiolliere@parity.io>
Co-authored-by: Guillaume Thiolliere <gui.thiolliere@gmail.com>
---
 cumulus/pallets/dmp-queue/src/tests.rs        |  20 +-
 .../runtime/common/src/assigned_slots/mod.rs  |  89 ++---
 polkadot/runtime/common/src/auctions/mock.rs  |  15 +-
 polkadot/runtime/common/src/auctions/tests.rs | 134 +++----
 polkadot/runtime/common/src/crowdloan/mod.rs  |  57 +--
 .../runtime/common/src/integration_tests.rs   |  16 +-
 .../common/src/paras_registrar/mock.rs        |  40 +-
 polkadot/runtime/common/src/slots/mod.rs      |  66 ++--
 prdoc/pr_7109.prdoc                           |  11 +
 .../multi-block-migrations/src/mock.rs        |  21 +-
 substrate/frame/fast-unstake/src/mock.rs      |  29 +-
 substrate/frame/identity/src/tests.rs         |  22 +-
 substrate/frame/lottery/src/mock.rs           |  18 +-
 substrate/frame/lottery/src/tests.rs          |  25 +-
 substrate/frame/migrations/src/mock.rs        |  31 +-
 substrate/frame/nis/src/mock.rs               |  14 +-
 substrate/frame/nis/src/tests.rs              |  90 ++---
 substrate/frame/nomination-pools/src/mock.rs  |  13 +-
 substrate/frame/recovery/src/mock.rs          |  16 +-
 substrate/frame/recovery/src/tests.rs         |  22 +-
 substrate/frame/root-offences/src/mock.rs     |  18 +-
 substrate/frame/scheduler/src/mock.rs         |  10 +-
 substrate/frame/scheduler/src/tests.rs        | 360 +++++++++---------
 substrate/frame/society/src/mock.rs           |  23 +-
 substrate/frame/society/src/tests.rs          |  12 +-
 substrate/frame/src/lib.rs                    |   2 +-
 substrate/frame/staking/src/mock.rs           |  47 +--
 .../frame/state-trie-migration/src/lib.rs     |  15 +-
 substrate/frame/system/src/lib.rs             | 111 ++++++
 .../frame/transaction-storage/src/mock.rs     |  25 +-
 30 files changed, 650 insertions(+), 722 deletions(-)
 create mode 100644 prdoc/pr_7109.prdoc

diff --git a/cumulus/pallets/dmp-queue/src/tests.rs b/cumulus/pallets/dmp-queue/src/tests.rs
index 70d542ea2ed..368a1c0b436 100644
--- a/cumulus/pallets/dmp-queue/src/tests.rs
+++ b/cumulus/pallets/dmp-queue/src/tests.rs
@@ -21,11 +21,7 @@
 use super::{migration::*, mock::*};
 use crate::*;
 
-use frame_support::{
-	pallet_prelude::*,
-	traits::{OnFinalize, OnIdle, OnInitialize},
-	StorageNoopGuard,
-};
+use frame_support::{pallet_prelude::*, traits::OnIdle, StorageNoopGuard};
 
 #[test]
 fn migration_works() {
@@ -183,14 +179,12 @@ fn migration_too_long_ignored() {
 }
 
 fn run_to_block(n: u64) {
-	assert!(n > System::block_number(), "Cannot go back in time");
-
-	while System::block_number() < n {
-		AllPalletsWithSystem::on_finalize(System::block_number());
-		System::set_block_number(System::block_number() + 1);
-		AllPalletsWithSystem::on_initialize(System::block_number());
-		AllPalletsWithSystem::on_idle(System::block_number(), Weight::MAX);
-	}
+	System::run_to_block_with::<AllPalletsWithSystem>(
+		n,
+		frame_system::RunToBlockHooks::default().after_initialize(|bn| {
+			AllPalletsWithSystem::on_idle(bn, Weight::MAX);
+		}),
+	);
 }
 
 fn assert_only_event(e: Event<Runtime>) {
diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs
index 65942c127b1..dea29f53cad 100644
--- a/polkadot/runtime/common/src/assigned_slots/mod.rs
+++ b/polkadot/runtime/common/src/assigned_slots/mod.rs
@@ -788,39 +788,14 @@ mod tests {
 		t.into()
 	}
 
-	fn run_to_block(n: BlockNumber) {
-		while System::block_number() < n {
-			let mut block = System::block_number();
-			// on_finalize hooks
-			AssignedSlots::on_finalize(block);
-			Slots::on_finalize(block);
-			Parachains::on_finalize(block);
-			ParasShared::on_finalize(block);
-			Configuration::on_finalize(block);
-			Balances::on_finalize(block);
-			System::on_finalize(block);
-			// Set next block
-			System::set_block_number(block + 1);
-			block = System::block_number();
-			// on_initialize hooks
-			System::on_initialize(block);
-			Balances::on_initialize(block);
-			Configuration::on_initialize(block);
-			ParasShared::on_initialize(block);
-			Parachains::on_initialize(block);
-			Slots::on_initialize(block);
-			AssignedSlots::on_initialize(block);
-		}
-	}
-
 	#[test]
 	fn basic_setup_works() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 			assert_eq!(AssignedSlots::current_lease_period_index(), 0);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 0);
 
-			run_to_block(3);
+			System::run_to_block::<AllPalletsWithSystem>(3);
 			assert_eq!(AssignedSlots::current_lease_period_index(), 1);
 		});
 	}
@@ -828,7 +803,7 @@ mod tests {
 	#[test]
 	fn assign_perm_slot_fails_for_unknown_para() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_noop!(
 				AssignedSlots::assign_perm_parachain_slot(
@@ -843,7 +818,7 @@ mod tests {
 	#[test]
 	fn assign_perm_slot_fails_for_invalid_origin() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_noop!(
 				AssignedSlots::assign_perm_parachain_slot(
@@ -858,7 +833,7 @@ mod tests {
 	#[test]
 	fn assign_perm_slot_fails_when_not_parathread() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
@@ -881,7 +856,7 @@ mod tests {
 	#[test]
 	fn assign_perm_slot_fails_when_existing_lease() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
@@ -920,7 +895,7 @@ mod tests {
 	#[test]
 	fn assign_perm_slot_fails_when_max_perm_slots_exceeded() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
@@ -967,7 +942,7 @@ mod tests {
 	fn assign_perm_slot_succeeds_for_parathread() {
 		new_test_ext().execute_with(|| {
 			let mut block = 1;
-			run_to_block(block);
+			System::run_to_block::<AllPalletsWithSystem>(block);
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
 				ParaId::from(1_u32),
@@ -1000,7 +975,7 @@ mod tests {
 				assert_eq!(Slots::already_leased(ParaId::from(1_u32), 0, 2), true);
 
 				block += 1;
-				run_to_block(block);
+				System::run_to_block::<AllPalletsWithSystem>(block);
 			}
 
 			// Para lease ended, downgraded back to parathread (on-demand parachain)
@@ -1012,7 +987,7 @@ mod tests {
 	#[test]
 	fn assign_temp_slot_fails_for_unknown_para() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_noop!(
 				AssignedSlots::assign_temp_parachain_slot(
@@ -1028,7 +1003,7 @@ mod tests {
 	#[test]
 	fn assign_temp_slot_fails_for_invalid_origin() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_noop!(
 				AssignedSlots::assign_temp_parachain_slot(
@@ -1044,7 +1019,7 @@ mod tests {
 	#[test]
 	fn assign_temp_slot_fails_when_not_parathread() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
@@ -1068,7 +1043,7 @@ mod tests {
 	#[test]
 	fn assign_temp_slot_fails_when_existing_lease() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
@@ -1109,7 +1084,7 @@ mod tests {
 	#[test]
 	fn assign_temp_slot_fails_when_max_temp_slots_exceeded() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			// Register 6 paras & a temp slot for each
 			for n in 0..=5 {
@@ -1151,7 +1126,7 @@ mod tests {
 	fn assign_temp_slot_succeeds_for_single_parathread() {
 		new_test_ext().execute_with(|| {
 			let mut block = 1;
-			run_to_block(block);
+			System::run_to_block::<AllPalletsWithSystem>(block);
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
 				ParaId::from(1_u32),
@@ -1195,7 +1170,7 @@ mod tests {
 				assert_eq!(Slots::already_leased(ParaId::from(1_u32), 0, 1), true);
 
 				block += 1;
-				run_to_block(block);
+				System::run_to_block::<AllPalletsWithSystem>(block);
 			}
 
 			// Block 6
@@ -1210,7 +1185,7 @@ mod tests {
 
 			// Block 12
 			// Para should get a turn after TemporarySlotLeasePeriodLength * LeasePeriod blocks
-			run_to_block(12);
+			System::run_to_block::<AllPalletsWithSystem>(12);
 			println!("block #{}", block);
 			println!("lease period #{}", AssignedSlots::current_lease_period_index());
 			println!("lease {:?}", slots::Leases::<Test>::get(ParaId::from(1_u32)));
@@ -1225,7 +1200,7 @@ mod tests {
 	fn assign_temp_slot_succeeds_for_multiple_parathreads() {
 		new_test_ext().execute_with(|| {
 			// Block 1, Period 0
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			// Register 6 paras & a temp slot for each
 			// (3 slots in current lease period, 3 in the next one)
@@ -1251,7 +1226,7 @@ mod tests {
 			// Block 1-5, Period 0-1
 			for n in 1..=5 {
 				if n > 1 {
-					run_to_block(n);
+					System::run_to_block::<AllPalletsWithSystem>(n);
 				}
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(0)), true);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(1_u32)), false);
@@ -1264,7 +1239,7 @@ mod tests {
 
 			// Block 6-11, Period 2-3
 			for n in 6..=11 {
-				run_to_block(n);
+				System::run_to_block::<AllPalletsWithSystem>(n);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(0)), false);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(1_u32)), true);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(2_u32)), false);
@@ -1276,7 +1251,7 @@ mod tests {
 
 			// Block 12-17, Period 4-5
 			for n in 12..=17 {
-				run_to_block(n);
+				System::run_to_block::<AllPalletsWithSystem>(n);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(0)), false);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(1_u32)), false);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(2_u32)), false);
@@ -1288,7 +1263,7 @@ mod tests {
 
 			// Block 18-23, Period 6-7
 			for n in 18..=23 {
-				run_to_block(n);
+				System::run_to_block::<AllPalletsWithSystem>(n);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(0)), true);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(1_u32)), false);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(2_u32)), true);
@@ -1300,7 +1275,7 @@ mod tests {
 
 			// Block 24-29, Period 8-9
 			for n in 24..=29 {
-				run_to_block(n);
+				System::run_to_block::<AllPalletsWithSystem>(n);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(0)), false);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(1_u32)), true);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(2_u32)), false);
@@ -1312,7 +1287,7 @@ mod tests {
 
 			// Block 30-35, Period 10-11
 			for n in 30..=35 {
-				run_to_block(n);
+				System::run_to_block::<AllPalletsWithSystem>(n);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(0)), false);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(1_u32)), false);
 				assert_eq!(TestRegistrar::<Test>::is_parachain(ParaId::from(2_u32)), false);
@@ -1327,7 +1302,7 @@ mod tests {
 	#[test]
 	fn unassign_slot_fails_for_unknown_para() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_noop!(
 				AssignedSlots::unassign_parachain_slot(RuntimeOrigin::root(), ParaId::from(1_u32),),
@@ -1339,7 +1314,7 @@ mod tests {
 	#[test]
 	fn unassign_slot_fails_for_invalid_origin() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_noop!(
 				AssignedSlots::assign_perm_parachain_slot(
@@ -1354,7 +1329,7 @@ mod tests {
 	#[test]
 	fn unassign_perm_slot_succeeds() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
@@ -1386,7 +1361,7 @@ mod tests {
 	#[test]
 	fn unassign_temp_slot_succeeds() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
@@ -1419,7 +1394,7 @@ mod tests {
 	#[test]
 	fn set_max_permanent_slots_fails_for_no_root_origin() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_noop!(
 				AssignedSlots::set_max_permanent_slots(RuntimeOrigin::signed(1), 5),
@@ -1430,7 +1405,7 @@ mod tests {
 	#[test]
 	fn set_max_permanent_slots_succeeds() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_eq!(MaxPermanentSlots::<Test>::get(), 2);
 			assert_ok!(AssignedSlots::set_max_permanent_slots(RuntimeOrigin::root(), 10),);
@@ -1441,7 +1416,7 @@ mod tests {
 	#[test]
 	fn set_max_temporary_slots_fails_for_no_root_origin() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_noop!(
 				AssignedSlots::set_max_temporary_slots(RuntimeOrigin::signed(1), 5),
@@ -1452,7 +1427,7 @@ mod tests {
 	#[test]
 	fn set_max_temporary_slots_succeeds() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_eq!(MaxTemporarySlots::<Test>::get(), 6);
 			assert_ok!(AssignedSlots::set_max_temporary_slots(RuntimeOrigin::root(), 12),);
diff --git a/polkadot/runtime/common/src/auctions/mock.rs b/polkadot/runtime/common/src/auctions/mock.rs
index 9fe19e579cf..e0365d363ca 100644
--- a/polkadot/runtime/common/src/auctions/mock.rs
+++ b/polkadot/runtime/common/src/auctions/mock.rs
@@ -20,8 +20,7 @@
 use super::*;
 use crate::{auctions, mock::TestRegistrar};
 use frame_support::{
-	assert_ok, derive_impl, ord_parameter_types, parameter_types,
-	traits::{EitherOfDiverse, OnFinalize, OnInitialize},
+	assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::EitherOfDiverse,
 };
 use frame_system::{EnsureRoot, EnsureSignedBy};
 use pallet_balances;
@@ -244,15 +243,3 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	});
 	ext
 }
-
-pub fn run_to_block(n: BlockNumber) {
-	while System::block_number() < n {
-		Auctions::on_finalize(System::block_number());
-		Balances::on_finalize(System::block_number());
-		System::on_finalize(System::block_number());
-		System::set_block_number(System::block_number() + 1);
-		System::on_initialize(System::block_number());
-		Balances::on_initialize(System::block_number());
-		Auctions::on_initialize(System::block_number());
-	}
-}
diff --git a/polkadot/runtime/common/src/auctions/tests.rs b/polkadot/runtime/common/src/auctions/tests.rs
index 07574eeb295..26e2ac47df8 100644
--- a/polkadot/runtime/common/src/auctions/tests.rs
+++ b/polkadot/runtime/common/src/auctions/tests.rs
@@ -36,7 +36,7 @@ fn basic_setup_works() {
 			AuctionStatus::<u32>::NotStarted
 		);
 
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 
 		assert_eq!(AuctionCounter::<Test>::get(), 0);
 		assert_eq!(TestLeaser::deposit_held(0u32.into(), &1), 0);
@@ -50,7 +50,7 @@ fn basic_setup_works() {
 #[test]
 fn can_start_auction() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 
 		assert_noop!(Auctions::new_auction(RuntimeOrigin::signed(1), 5, 1), BadOrigin);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
@@ -66,7 +66,7 @@ fn can_start_auction() {
 #[test]
 fn bidding_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5));
 
@@ -82,7 +82,7 @@ fn bidding_works() {
 #[test]
 fn under_bidding_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5));
@@ -96,7 +96,7 @@ fn under_bidding_works() {
 #[test]
 fn over_bidding_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 5));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 1, 4, 6));
@@ -115,7 +115,7 @@ fn over_bidding_works() {
 #[test]
 fn auction_proceeds_correctly() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 
@@ -125,49 +125,49 @@ fn auction_proceeds_correctly() {
 			AuctionStatus::<u32>::StartingPeriod
 		);
 
-		run_to_block(2);
+		System::run_to_block::<AllPalletsWithSystem>(2);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::StartingPeriod
 		);
 
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::StartingPeriod
 		);
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::StartingPeriod
 		);
 
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::StartingPeriod
 		);
 
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(0, 0)
 		);
 
-		run_to_block(7);
+		System::run_to_block::<AllPalletsWithSystem>(7);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(1, 0)
 		);
 
-		run_to_block(8);
+		System::run_to_block::<AllPalletsWithSystem>(8);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(2, 0)
 		);
 
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::NotStarted
@@ -178,12 +178,12 @@ fn auction_proceeds_correctly() {
 #[test]
 fn can_win_auction() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1));
 		assert_eq!(Balances::reserved_balance(1), 1);
 		assert_eq!(Balances::free_balance(1), 9);
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 
 		assert_eq!(
 			leases(),
@@ -201,7 +201,7 @@ fn can_win_auction() {
 #[test]
 fn can_win_auction_with_late_randomness() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1));
 		assert_eq!(Balances::reserved_balance(1), 1);
@@ -210,7 +210,7 @@ fn can_win_auction_with_late_randomness() {
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::StartingPeriod
 		);
-		run_to_block(8);
+		System::run_to_block::<AllPalletsWithSystem>(8);
 		// Auction has not yet ended.
 		assert_eq!(leases(), vec![]);
 		assert_eq!(
@@ -222,7 +222,7 @@ fn can_win_auction_with_late_randomness() {
 		set_last_random(H256::zero(), 8);
 		// Auction definitely ended now, but we don't know exactly when in the last 3 blocks yet
 		// since no randomness available yet.
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		// Auction has now ended... But auction winner still not yet decided, so no leases yet.
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
@@ -233,7 +233,7 @@ fn can_win_auction_with_late_randomness() {
 		// Random seed now updated to a value known at block 9, when the auction ended. This
 		// means that the winner can now be chosen.
 		set_last_random(H256::zero(), 9);
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		// Auction ended and winner selected
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
@@ -255,10 +255,10 @@ fn can_win_auction_with_late_randomness() {
 #[test]
 fn can_win_incomplete_auction() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 4, 4, 5));
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 
 		assert_eq!(leases(), vec![((0.into(), 4), LeaseData { leaser: 1, amount: 5 }),]);
 		assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5);
@@ -268,13 +268,13 @@ fn can_win_incomplete_auction() {
 #[test]
 fn should_choose_best_combination() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 1));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(2), 0.into(), 1, 2, 3, 4));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), 0.into(), 1, 4, 4, 2));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1.into(), 1, 1, 4, 2));
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 
 		assert_eq!(
 			leases(),
@@ -295,7 +295,7 @@ fn should_choose_best_combination() {
 #[test]
 fn gap_bid_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 
 		// User 1 will make a bid for period 1 and 4 for the same Para 0
@@ -314,7 +314,7 @@ fn gap_bid_works() {
 		assert_eq!(Balances::reserved_balance(3), 3);
 
 		// End the auction.
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 
 		assert_eq!(
 			leases(),
@@ -334,11 +334,11 @@ fn gap_bid_works() {
 #[test]
 fn deposit_credit_should_work() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 5));
 		assert_eq!(Balances::reserved_balance(1), 5);
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 
 		assert_eq!(leases(), vec![((0.into(), 1), LeaseData { leaser: 1, amount: 5 }),]);
 		assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5);
@@ -347,7 +347,7 @@ fn deposit_credit_should_work() {
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 2, 2, 2, 6));
 		// Only 1 reserved since we have a deposit credit of 5.
 		assert_eq!(Balances::reserved_balance(1), 1);
-		run_to_block(20);
+		System::run_to_block::<AllPalletsWithSystem>(20);
 
 		assert_eq!(
 			leases(),
@@ -363,11 +363,11 @@ fn deposit_credit_should_work() {
 #[test]
 fn deposit_credit_on_alt_para_should_not_count() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 1, 5));
 		assert_eq!(Balances::reserved_balance(1), 5);
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 
 		assert_eq!(leases(), vec![((0.into(), 1), LeaseData { leaser: 1, amount: 5 }),]);
 		assert_eq!(TestLeaser::deposit_held(0.into(), &1), 5);
@@ -376,7 +376,7 @@ fn deposit_credit_on_alt_para_should_not_count() {
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 1.into(), 2, 2, 2, 6));
 		// 6 reserved since we are bidding on a new para; only works because we don't
 		assert_eq!(Balances::reserved_balance(1), 6);
-		run_to_block(20);
+		System::run_to_block::<AllPalletsWithSystem>(20);
 
 		assert_eq!(
 			leases(),
@@ -393,12 +393,12 @@ fn deposit_credit_on_alt_para_should_not_count() {
 #[test]
 fn multiple_bids_work_pre_ending() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 
 		for i in 1..6u64 {
-			run_to_block(i as _);
+			System::run_to_block::<AllPalletsWithSystem>(i as _);
 			assert_ok!(Auctions::bid(RuntimeOrigin::signed(i), 0.into(), 1, 1, 4, i));
 			for j in 1..6 {
 				assert_eq!(Balances::reserved_balance(j), if j == i { j } else { 0 });
@@ -406,7 +406,7 @@ fn multiple_bids_work_pre_ending() {
 			}
 		}
 
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert_eq!(
 			leases(),
 			vec![
@@ -422,12 +422,12 @@ fn multiple_bids_work_pre_ending() {
 #[test]
 fn multiple_bids_work_post_ending() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 0, 1));
 
 		for i in 1..6u64 {
-			run_to_block(((i - 1) / 2 + 1) as _);
+			System::run_to_block::<AllPalletsWithSystem>(((i - 1) / 2 + 1) as _);
 			assert_ok!(Auctions::bid(RuntimeOrigin::signed(i), 0.into(), 1, 1, 4, i));
 			for j in 1..6 {
 				assert_eq!(Balances::reserved_balance(j), if j <= i { j } else { 0 });
@@ -438,7 +438,7 @@ fn multiple_bids_work_post_ending() {
 			assert_eq!(ReservedAmounts::<Test>::get((i, ParaId::from(0))).unwrap(), i);
 		}
 
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		assert_eq!(
 			leases(),
 			(1..=4)
@@ -501,7 +501,7 @@ fn calculate_winners_works() {
 #[test]
 fn lower_bids_are_correctly_refunded() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 1, 1));
 		let para_1 = ParaId::from(1_u32);
 		let para_2 = ParaId::from(2_u32);
@@ -527,7 +527,7 @@ fn initialize_winners_in_ending_period_works() {
 	new_test_ext().execute_with(|| {
 		let ed: u64 = <Test as pallet_balances::Config>::ExistentialDeposit::get();
 		assert_eq!(ed, 1);
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 1));
 		let para_1 = ParaId::from(1_u32);
 		let para_2 = ParaId::from(2_u32);
@@ -546,20 +546,20 @@ fn initialize_winners_in_ending_period_works() {
 		winning[SlotRange::TwoThree as u8 as usize] = Some((2, para_2, 19));
 		assert_eq!(Winning::<Test>::get(0), Some(winning));
 
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::StartingPeriod
 		);
 
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(0, 0)
 		);
 		assert_eq!(Winning::<Test>::get(0), Some(winning));
 
-		run_to_block(11);
+		System::run_to_block::<AllPalletsWithSystem>(11);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(1, 0)
@@ -567,7 +567,7 @@ fn initialize_winners_in_ending_period_works() {
 		assert_eq!(Winning::<Test>::get(1), Some(winning));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 3, 4, 29));
 
-		run_to_block(12);
+		System::run_to_block::<AllPalletsWithSystem>(12);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(2, 0)
@@ -580,7 +580,7 @@ fn initialize_winners_in_ending_period_works() {
 #[test]
 fn handle_bid_requires_registered_para() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 		assert_noop!(
 			Auctions::bid(RuntimeOrigin::signed(1), 1337.into(), 1, 1, 4, 1),
@@ -599,12 +599,12 @@ fn handle_bid_requires_registered_para() {
 #[test]
 fn handle_bid_checks_existing_lease_periods() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 2, 3, 1));
 		assert_eq!(Balances::reserved_balance(1), 1);
 		assert_eq!(Balances::free_balance(1), 9);
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 
 		assert_eq!(
 			leases(),
@@ -644,7 +644,7 @@ fn less_winning_samples_work() {
 		EndingPeriod::set(30);
 		SampleLength::set(10);
 
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 11));
 		let para_1 = ParaId::from(1_u32);
 		let para_2 = ParaId::from(2_u32);
@@ -663,13 +663,13 @@ fn less_winning_samples_work() {
 		winning[SlotRange::TwoThree as u8 as usize] = Some((2, para_2, 19));
 		assert_eq!(Winning::<Test>::get(0), Some(winning));
 
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::StartingPeriod
 		);
 
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(0, 0)
@@ -681,19 +681,19 @@ fn less_winning_samples_work() {
 		winning[SlotRange::ThreeThree as u8 as usize] = Some((3, para_3, 29));
 		assert_eq!(Winning::<Test>::get(0), Some(winning));
 
-		run_to_block(20);
+		System::run_to_block::<AllPalletsWithSystem>(20);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(1, 0)
 		);
 		assert_eq!(Winning::<Test>::get(1), Some(winning));
-		run_to_block(25);
+		System::run_to_block::<AllPalletsWithSystem>(25);
 		// Overbid mid sample
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(3), para_3, 1, 13, 14, 29));
 		winning[SlotRange::TwoThree as u8 as usize] = Some((3, para_3, 29));
 		assert_eq!(Winning::<Test>::get(1), Some(winning));
 
-		run_to_block(30);
+		System::run_to_block::<AllPalletsWithSystem>(30);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(2, 0)
@@ -701,7 +701,7 @@ fn less_winning_samples_work() {
 		assert_eq!(Winning::<Test>::get(2), Some(winning));
 
 		set_last_random(H256::from([254; 32]), 40);
-		run_to_block(40);
+		System::run_to_block::<AllPalletsWithSystem>(40);
 		// Auction ended and winner selected
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
@@ -729,71 +729,71 @@ fn auction_status_works() {
 			AuctionStatus::<u32>::NotStarted
 		);
 
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 9, 11));
 
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::StartingPeriod
 		);
 
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(0, 0)
 		);
 
-		run_to_block(11);
+		System::run_to_block::<AllPalletsWithSystem>(11);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(0, 1)
 		);
 
-		run_to_block(19);
+		System::run_to_block::<AllPalletsWithSystem>(19);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(0, 9)
 		);
 
-		run_to_block(20);
+		System::run_to_block::<AllPalletsWithSystem>(20);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(1, 0)
 		);
 
-		run_to_block(25);
+		System::run_to_block::<AllPalletsWithSystem>(25);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(1, 5)
 		);
 
-		run_to_block(30);
+		System::run_to_block::<AllPalletsWithSystem>(30);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(2, 0)
 		);
 
-		run_to_block(39);
+		System::run_to_block::<AllPalletsWithSystem>(39);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::EndingPeriod(2, 9)
 		);
 
-		run_to_block(40);
+		System::run_to_block::<AllPalletsWithSystem>(40);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::VrfDelay(0)
 		);
 
-		run_to_block(44);
+		System::run_to_block::<AllPalletsWithSystem>(44);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::VrfDelay(4)
 		);
 
 		set_last_random(dummy_hash(), 45);
-		run_to_block(45);
+		System::run_to_block::<AllPalletsWithSystem>(45);
 		assert_eq!(
 			Auctions::auction_status(System::block_number()),
 			AuctionStatus::<u32>::NotStarted
@@ -804,7 +804,7 @@ fn auction_status_works() {
 #[test]
 fn can_cancel_auction() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Auctions::new_auction(RuntimeOrigin::signed(6), 5, 1));
 		assert_ok!(Auctions::bid(RuntimeOrigin::signed(1), 0.into(), 1, 1, 4, 1));
 		assert_eq!(Balances::reserved_balance(1), 1);
diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs
index 8cf288197e3..f8b3169407e 100644
--- a/polkadot/runtime/common/src/crowdloan/mod.rs
+++ b/polkadot/runtime/common/src/crowdloan/mod.rs
@@ -858,10 +858,7 @@ mod crypto {
 mod tests {
 	use super::*;
 
-	use frame_support::{
-		assert_noop, assert_ok, derive_impl, parameter_types,
-		traits::{OnFinalize, OnInitialize},
-	};
+	use frame_support::{assert_noop, assert_ok, derive_impl, parameter_types};
 	use polkadot_primitives::Id as ParaId;
 	use sp_core::H256;
 	use std::{cell::RefCell, collections::BTreeMap, sync::Arc};
@@ -1111,18 +1108,6 @@ mod tests {
 		unreachable!()
 	}
 
-	fn run_to_block(n: u64) {
-		while System::block_number() < n {
-			Crowdloan::on_finalize(System::block_number());
-			Balances::on_finalize(System::block_number());
-			System::on_finalize(System::block_number());
-			System::set_block_number(System::block_number() + 1);
-			System::on_initialize(System::block_number());
-			Balances::on_initialize(System::block_number());
-			Crowdloan::on_initialize(System::block_number());
-		}
-	}
-
 	fn last_event() -> RuntimeEvent {
 		System::events().pop().expect("RuntimeEvent expected").event
 	}
@@ -1426,7 +1411,7 @@ mod tests {
 			);
 
 			// Move past end date
-			run_to_block(10);
+			System::run_to_block::<AllPalletsWithSystem>(10);
 
 			// Cannot contribute to ended fund
 			assert_noop!(
@@ -1451,7 +1436,7 @@ mod tests {
 			// crowdloan that has starting period 1.
 			let para_3 = new_para();
 			assert_ok!(Crowdloan::create(RuntimeOrigin::signed(1), para_3, 1000, 1, 4, 40, None));
-			run_to_block(40);
+			System::run_to_block::<AllPalletsWithSystem>(40);
 			let now = System::block_number();
 			assert_eq!(TestAuctioneer::lease_period_index(now).unwrap().0, 2);
 			assert_noop!(
@@ -1483,12 +1468,12 @@ mod tests {
 				None
 			));
 
-			run_to_block(8);
+			System::run_to_block::<AllPalletsWithSystem>(8);
 			// Can def contribute when auction is running.
 			assert!(TestAuctioneer::auction_status(System::block_number()).is_ending().is_some());
 			assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 250, None));
 
-			run_to_block(10);
+			System::run_to_block::<AllPalletsWithSystem>(10);
 			// Can't contribute when auction is in the VRF delay period.
 			assert!(TestAuctioneer::auction_status(System::block_number()).is_vrf());
 			assert_noop!(
@@ -1496,7 +1481,7 @@ mod tests {
 				Error::<Test>::VrfDelayInProgress
 			);
 
-			run_to_block(15);
+			System::run_to_block::<AllPalletsWithSystem>(15);
 			// Its fine to contribute when no auction is running.
 			assert!(!TestAuctioneer::auction_status(System::block_number()).is_in_progress());
 			assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 250, None));
@@ -1526,15 +1511,15 @@ mod tests {
 			let bidder = Crowdloan::fund_account_id(index);
 
 			// Fund crowdloan
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 			assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 100, None));
-			run_to_block(3);
+			System::run_to_block::<AllPalletsWithSystem>(3);
 			assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(3), para, 150, None));
-			run_to_block(5);
+			System::run_to_block::<AllPalletsWithSystem>(5);
 			assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(4), para, 200, None));
-			run_to_block(8);
+			System::run_to_block::<AllPalletsWithSystem>(8);
 			assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 250, None));
-			run_to_block(10);
+			System::run_to_block::<AllPalletsWithSystem>(10);
 
 			assert_eq!(
 				bids(),
@@ -1561,7 +1546,7 @@ mod tests {
 			assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 100, None));
 			assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(3), para, 50, None));
 
-			run_to_block(10);
+			System::run_to_block::<AllPalletsWithSystem>(10);
 			let account_id = Crowdloan::fund_account_id(index);
 			// para has no reserved funds, indicating it did not win the auction.
 			assert_eq!(Balances::reserved_balance(&account_id), 0);
@@ -1591,7 +1576,7 @@ mod tests {
 			assert_ok!(Crowdloan::create(RuntimeOrigin::signed(1), para, 1000, 1, 1, 9, None));
 			assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 100, None));
 
-			run_to_block(10);
+			System::run_to_block::<AllPalletsWithSystem>(10);
 			let account_id = Crowdloan::fund_account_id(index);
 
 			// user sends the crowdloan funds trying to make an accounting error
@@ -1636,7 +1621,7 @@ mod tests {
 			);
 
 			// Move to the end of the crowdloan
-			run_to_block(10);
+			System::run_to_block::<AllPalletsWithSystem>(10);
 			assert_ok!(Crowdloan::refund(RuntimeOrigin::signed(1337), para));
 
 			// Funds are returned
@@ -1671,7 +1656,7 @@ mod tests {
 			assert_eq!(Balances::free_balance(account_id), 21000);
 
 			// Move to the end of the crowdloan
-			run_to_block(10);
+			System::run_to_block::<AllPalletsWithSystem>(10);
 			assert_ok!(Crowdloan::refund(RuntimeOrigin::signed(1337), para));
 			assert_eq!(
 				last_event(),
@@ -1705,7 +1690,7 @@ mod tests {
 			assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 100, None));
 			assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(3), para, 50, None));
 
-			run_to_block(10);
+			System::run_to_block::<AllPalletsWithSystem>(10);
 			// All funds are refunded
 			assert_ok!(Crowdloan::refund(RuntimeOrigin::signed(2), para));
 
@@ -1730,7 +1715,7 @@ mod tests {
 			assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para, 100, None));
 			assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(3), para, 50, None));
 
-			run_to_block(10);
+			System::run_to_block::<AllPalletsWithSystem>(10);
 
 			// We test the historic case where crowdloan accounts only have one provider:
 			{
@@ -1770,7 +1755,7 @@ mod tests {
 				Error::<Test>::NotReadyToDissolve
 			);
 
-			run_to_block(10);
+			System::run_to_block::<AllPalletsWithSystem>(10);
 			set_winner(para, 1, true);
 			// Can't dissolve when it won.
 			assert_noop!(
@@ -1815,13 +1800,13 @@ mod tests {
 			// simulate the reserving of para's funds. this actually happens in the Slots pallet.
 			assert_ok!(Balances::reserve(&account_id, 149));
 
-			run_to_block(19);
+			System::run_to_block::<AllPalletsWithSystem>(19);
 			assert_noop!(
 				Crowdloan::withdraw(RuntimeOrigin::signed(2), 2, para),
 				Error::<Test>::BidOrLeaseActive
 			);
 
-			run_to_block(20);
+			System::run_to_block::<AllPalletsWithSystem>(20);
 			// simulate the unreserving of para's funds, now that the lease expired. this actually
 			// happens in the Slots pallet.
 			Balances::unreserve(&account_id, 150);
@@ -1949,7 +1934,7 @@ mod tests {
 				Error::<Test>::NoContributions
 			);
 			assert_ok!(Crowdloan::contribute(RuntimeOrigin::signed(2), para_1, 100, None));
-			run_to_block(6);
+			System::run_to_block::<AllPalletsWithSystem>(6);
 			assert_ok!(Crowdloan::poke(RuntimeOrigin::signed(1), para_1));
 			assert_eq!(crowdloan::NewRaise::<Test>::get(), vec![para_1]);
 			assert_noop!(
diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs
index 8a76a138305..bb4ad8b7506 100644
--- a/polkadot/runtime/common/src/integration_tests.rs
+++ b/polkadot/runtime/common/src/integration_tests.rs
@@ -28,7 +28,7 @@ use alloc::sync::Arc;
 use codec::Encode;
 use frame_support::{
 	assert_noop, assert_ok, derive_impl, parameter_types,
-	traits::{ConstU32, Currency, OnFinalize, OnInitialize},
+	traits::{ConstU32, Currency},
 	weights::Weight,
 	PalletId,
 };
@@ -377,14 +377,12 @@ fn add_blocks(n: u32) {
 }
 
 fn run_to_block(n: u32) {
-	assert!(System::block_number() < n);
-	while System::block_number() < n {
-		let block_number = System::block_number();
-		AllPalletsWithSystem::on_finalize(block_number);
-		System::set_block_number(block_number + 1);
-		maybe_new_session(block_number + 1);
-		AllPalletsWithSystem::on_initialize(block_number + 1);
-	}
+	System::run_to_block_with::<AllPalletsWithSystem>(
+		n,
+		frame_system::RunToBlockHooks::default().before_initialize(|bn| {
+			maybe_new_session(bn);
+		}),
+	);
 }
 
 fn run_to_session(n: u32) {
diff --git a/polkadot/runtime/common/src/paras_registrar/mock.rs b/polkadot/runtime/common/src/paras_registrar/mock.rs
index 1627fd70365..07b8fbca518 100644
--- a/polkadot/runtime/common/src/paras_registrar/mock.rs
+++ b/polkadot/runtime/common/src/paras_registrar/mock.rs
@@ -20,10 +20,7 @@
 use super::*;
 use crate::paras_registrar;
 use alloc::collections::btree_map::BTreeMap;
-use frame_support::{
-	derive_impl, parameter_types,
-	traits::{OnFinalize, OnInitialize},
-};
+use frame_support::{derive_impl, parameter_types};
 use frame_system::limits;
 use polkadot_primitives::{Balance, BlockNumber, MAX_CODE_SIZE};
 use polkadot_runtime_parachains::{configuration, origin, shared};
@@ -205,26 +202,21 @@ pub const VALIDATORS: &[Sr25519Keyring] = &[
 pub fn run_to_block(n: BlockNumber) {
 	// NOTE that this function only simulates modules of interest. Depending on new pallet may
 	// require adding it here.
-	assert!(System::block_number() < n);
-	while System::block_number() < n {
-		let b = System::block_number();
-
-		if System::block_number() > 1 {
-			System::on_finalize(System::block_number());
-		}
-		// Session change every 3 blocks.
-		if (b + 1) % BLOCKS_PER_SESSION == 0 {
-			let session_index = shared::CurrentSessionIndex::<Test>::get() + 1;
-			let validators_pub_keys = VALIDATORS.iter().map(|v| v.public().into()).collect();
-
-			shared::Pallet::<Test>::set_session_index(session_index);
-			shared::Pallet::<Test>::set_active_validators_ascending(validators_pub_keys);
-
-			Parachains::test_on_new_session();
-		}
-		System::set_block_number(b + 1);
-		System::on_initialize(System::block_number());
-	}
+	System::run_to_block_with::<AllPalletsWithSystem>(
+		n,
+		frame_system::RunToBlockHooks::default().before_finalize(|bn| {
+			// Session change every 3 blocks.
+			if (bn + 1) % BLOCKS_PER_SESSION == 0 {
+				let session_index = shared::CurrentSessionIndex::<Test>::get() + 1;
+				let validators_pub_keys = VALIDATORS.iter().map(|v| v.public().into()).collect();
+
+				shared::Pallet::<Test>::set_session_index(session_index);
+				shared::Pallet::<Test>::set_active_validators_ascending(validators_pub_keys);
+
+				Parachains::test_on_new_session();
+			}
+		}),
+	);
 }
 
 pub fn run_to_session(n: BlockNumber) {
diff --git a/polkadot/runtime/common/src/slots/mod.rs b/polkadot/runtime/common/src/slots/mod.rs
index 333f14c6608..59a1f1870b2 100644
--- a/polkadot/runtime/common/src/slots/mod.rs
+++ b/polkadot/runtime/common/src/slots/mod.rs
@@ -584,28 +584,16 @@ mod tests {
 		t.into()
 	}
 
-	fn run_to_block(n: BlockNumber) {
-		while System::block_number() < n {
-			Slots::on_finalize(System::block_number());
-			Balances::on_finalize(System::block_number());
-			System::on_finalize(System::block_number());
-			System::set_block_number(System::block_number() + 1);
-			System::on_initialize(System::block_number());
-			Balances::on_initialize(System::block_number());
-			Slots::on_initialize(System::block_number());
-		}
-	}
-
 	#[test]
 	fn basic_setup_works() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 			assert_eq!(Slots::lease_period_length(), (10, 0));
 			let now = System::block_number();
 			assert_eq!(Slots::lease_period_index(now).unwrap().0, 0);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 0);
 
-			run_to_block(10);
+			System::run_to_block::<AllPalletsWithSystem>(10);
 			let now = System::block_number();
 			assert_eq!(Slots::lease_period_index(now).unwrap().0, 1);
 		});
@@ -614,7 +602,7 @@ mod tests {
 	#[test]
 	fn lease_lifecycle_works() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
@@ -627,11 +615,11 @@ mod tests {
 			assert_eq!(Slots::deposit_held(1.into(), &1), 1);
 			assert_eq!(Balances::reserved_balance(1), 1);
 
-			run_to_block(19);
+			System::run_to_block::<AllPalletsWithSystem>(19);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 1);
 			assert_eq!(Balances::reserved_balance(1), 1);
 
-			run_to_block(20);
+			System::run_to_block::<AllPalletsWithSystem>(20);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 0);
 			assert_eq!(Balances::reserved_balance(1), 0);
 
@@ -645,7 +633,7 @@ mod tests {
 	#[test]
 	fn lease_interrupted_lifecycle_works() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
@@ -657,19 +645,19 @@ mod tests {
 			assert_ok!(Slots::lease_out(1.into(), &1, 6, 1, 1));
 			assert_ok!(Slots::lease_out(1.into(), &1, 4, 3, 1));
 
-			run_to_block(19);
+			System::run_to_block::<AllPalletsWithSystem>(19);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 6);
 			assert_eq!(Balances::reserved_balance(1), 6);
 
-			run_to_block(20);
+			System::run_to_block::<AllPalletsWithSystem>(20);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 4);
 			assert_eq!(Balances::reserved_balance(1), 4);
 
-			run_to_block(39);
+			System::run_to_block::<AllPalletsWithSystem>(39);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 4);
 			assert_eq!(Balances::reserved_balance(1), 4);
 
-			run_to_block(40);
+			System::run_to_block::<AllPalletsWithSystem>(40);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 0);
 			assert_eq!(Balances::reserved_balance(1), 0);
 
@@ -688,7 +676,7 @@ mod tests {
 	#[test]
 	fn lease_relayed_lifecycle_works() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
@@ -704,25 +692,25 @@ mod tests {
 			assert_eq!(Slots::deposit_held(1.into(), &2), 4);
 			assert_eq!(Balances::reserved_balance(2), 4);
 
-			run_to_block(19);
+			System::run_to_block::<AllPalletsWithSystem>(19);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 6);
 			assert_eq!(Balances::reserved_balance(1), 6);
 			assert_eq!(Slots::deposit_held(1.into(), &2), 4);
 			assert_eq!(Balances::reserved_balance(2), 4);
 
-			run_to_block(20);
+			System::run_to_block::<AllPalletsWithSystem>(20);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 0);
 			assert_eq!(Balances::reserved_balance(1), 0);
 			assert_eq!(Slots::deposit_held(1.into(), &2), 4);
 			assert_eq!(Balances::reserved_balance(2), 4);
 
-			run_to_block(29);
+			System::run_to_block::<AllPalletsWithSystem>(29);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 0);
 			assert_eq!(Balances::reserved_balance(1), 0);
 			assert_eq!(Slots::deposit_held(1.into(), &2), 4);
 			assert_eq!(Balances::reserved_balance(2), 4);
 
-			run_to_block(30);
+			System::run_to_block::<AllPalletsWithSystem>(30);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 0);
 			assert_eq!(Balances::reserved_balance(1), 0);
 			assert_eq!(Slots::deposit_held(1.into(), &2), 0);
@@ -738,7 +726,7 @@ mod tests {
 	#[test]
 	fn lease_deposit_increase_works() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
@@ -755,11 +743,11 @@ mod tests {
 			assert_eq!(Slots::deposit_held(1.into(), &1), 6);
 			assert_eq!(Balances::reserved_balance(1), 6);
 
-			run_to_block(29);
+			System::run_to_block::<AllPalletsWithSystem>(29);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 6);
 			assert_eq!(Balances::reserved_balance(1), 6);
 
-			run_to_block(30);
+			System::run_to_block::<AllPalletsWithSystem>(30);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 0);
 			assert_eq!(Balances::reserved_balance(1), 0);
 
@@ -773,7 +761,7 @@ mod tests {
 	#[test]
 	fn lease_deposit_decrease_works() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
@@ -790,19 +778,19 @@ mod tests {
 			assert_eq!(Slots::deposit_held(1.into(), &1), 6);
 			assert_eq!(Balances::reserved_balance(1), 6);
 
-			run_to_block(19);
+			System::run_to_block::<AllPalletsWithSystem>(19);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 6);
 			assert_eq!(Balances::reserved_balance(1), 6);
 
-			run_to_block(20);
+			System::run_to_block::<AllPalletsWithSystem>(20);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 4);
 			assert_eq!(Balances::reserved_balance(1), 4);
 
-			run_to_block(29);
+			System::run_to_block::<AllPalletsWithSystem>(29);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 4);
 			assert_eq!(Balances::reserved_balance(1), 4);
 
-			run_to_block(30);
+			System::run_to_block::<AllPalletsWithSystem>(30);
 			assert_eq!(Slots::deposit_held(1.into(), &1), 0);
 			assert_eq!(Balances::reserved_balance(1), 0);
 
@@ -816,7 +804,7 @@ mod tests {
 	#[test]
 	fn clear_all_leases_works() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
@@ -852,7 +840,7 @@ mod tests {
 	#[test]
 	fn lease_out_current_lease_period() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
@@ -867,7 +855,7 @@ mod tests {
 				dummy_validation_code()
 			));
 
-			run_to_block(20);
+			System::run_to_block::<AllPalletsWithSystem>(20);
 			let now = System::block_number();
 			assert_eq!(Slots::lease_period_index(now).unwrap().0, 2);
 			// Can't lease from the past
@@ -884,7 +872,7 @@ mod tests {
 	#[test]
 	fn trigger_onboard_works() {
 		new_test_ext().execute_with(|| {
-			run_to_block(1);
+			System::run_to_block::<AllPalletsWithSystem>(1);
 			assert_ok!(TestRegistrar::<Test>::register(
 				1,
 				ParaId::from(1_u32),
diff --git a/prdoc/pr_7109.prdoc b/prdoc/pr_7109.prdoc
new file mode 100644
index 00000000000..e54ef329513
--- /dev/null
+++ b/prdoc/pr_7109.prdoc
@@ -0,0 +1,11 @@
+title: Add "run to block" tools
+doc:
+- audience: Runtime Dev
+  description: |-
+    Introduce `frame_system::Pallet::run_to_block`, `frame_system::Pallet::run_to_block_with`, and `frame_system::RunToBlockHooks` to establish a generic `run_to_block` mechanism for mock tests, minimizing redundant implementations across various pallets.
+
+    Closes #299.
+
+crates:
+- name: frame-system
+  bump: minor
diff --git a/substrate/frame/examples/multi-block-migrations/src/mock.rs b/substrate/frame/examples/multi-block-migrations/src/mock.rs
index b2a946e1c50..64940db080c 100644
--- a/substrate/frame/examples/multi-block-migrations/src/mock.rs
+++ b/substrate/frame/examples/multi-block-migrations/src/mock.rs
@@ -25,10 +25,7 @@
 //! using the [`Migrations`] type.
 
 use frame_support::{
-	construct_runtime, derive_impl,
-	migrations::MultiStepMigrator,
-	pallet_prelude::Weight,
-	traits::{OnFinalize, OnInitialize},
+	construct_runtime, derive_impl, migrations::MultiStepMigrator, pallet_prelude::Weight,
 };
 
 type Block = frame_system::mocking::MockBlock<Runtime>;
@@ -81,13 +78,11 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 
 #[allow(dead_code)]
 pub fn run_to_block(n: u64) {
-	assert!(System::block_number() < n);
-	while System::block_number() < n {
-		let b = System::block_number();
-		AllPalletsWithSystem::on_finalize(b);
-		// Done by Executive:
-		<Runtime as frame_system::Config>::MultiBlockMigrator::step();
-		System::set_block_number(b + 1);
-		AllPalletsWithSystem::on_initialize(b + 1);
-	}
+	System::run_to_block_with::<AllPalletsWithSystem>(
+		n,
+		frame_system::RunToBlockHooks::default().after_initialize(|_| {
+			// Done by Executive:
+			<Runtime as frame_system::Config>::MultiBlockMigrator::step();
+		}),
+	);
 }
diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs
index 757052e230a..f044fc61018 100644
--- a/substrate/frame/fast-unstake/src/mock.rs
+++ b/substrate/frame/fast-unstake/src/mock.rs
@@ -266,22 +266,19 @@ impl ExtBuilder {
 }
 
 pub(crate) fn run_to_block(n: u64, on_idle: bool) {
-	let current_block = System::block_number();
-	assert!(n > current_block);
-	while System::block_number() < n {
-		Balances::on_finalize(System::block_number());
-		Staking::on_finalize(System::block_number());
-		FastUnstake::on_finalize(System::block_number());
-
-		System::set_block_number(System::block_number() + 1);
-
-		Balances::on_initialize(System::block_number());
-		Staking::on_initialize(System::block_number());
-		FastUnstake::on_initialize(System::block_number());
-		if on_idle {
-			FastUnstake::on_idle(System::block_number(), BlockWeights::get().max_block);
-		}
-	}
+	System::run_to_block_with::<AllPalletsWithSystem>(
+		n,
+		frame_system::RunToBlockHooks::default()
+			.before_finalize(|_| {
+				// Satisfy the timestamp pallet.
+				Timestamp::set_timestamp(0);
+			})
+			.after_initialize(|bn| {
+				if on_idle {
+					FastUnstake::on_idle(bn, BlockWeights::get().max_block);
+				}
+			}),
+	);
 }
 
 pub(crate) fn next_block(on_idle: bool) {
diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs
index 7bf5b2a7276..01bc312723a 100644
--- a/substrate/frame/identity/src/tests.rs
+++ b/substrate/frame/identity/src/tests.rs
@@ -26,7 +26,7 @@ use crate::{
 use codec::{Decode, Encode};
 use frame_support::{
 	assert_err, assert_noop, assert_ok, derive_impl, parameter_types,
-	traits::{ConstU32, ConstU64, Get, OnFinalize, OnInitialize},
+	traits::{ConstU32, ConstU64, Get},
 	BoundedVec,
 };
 use frame_system::EnsureRoot;
@@ -114,18 +114,6 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	ext
 }
 
-fn run_to_block(n: u64) {
-	while System::block_number() < n {
-		Identity::on_finalize(System::block_number());
-		Balances::on_finalize(System::block_number());
-		System::on_finalize(System::block_number());
-		System::set_block_number(System::block_number() + 1);
-		System::on_initialize(System::block_number());
-		Balances::on_initialize(System::block_number());
-		Identity::on_initialize(System::block_number());
-	}
-}
-
 fn account(id: u8) -> AccountIdOf<Test> {
 	[id; 32].into()
 }
@@ -1714,7 +1702,7 @@ fn unaccepted_usernames_through_grant_should_expire() {
 			Some((who.clone(), expiration, Provider::Allocation))
 		);
 
-		run_to_block(now + expiration - 1);
+		System::run_to_block::<AllPalletsWithSystem>(now + expiration - 1);
 
 		// Cannot be removed
 		assert_noop!(
@@ -1722,7 +1710,7 @@ fn unaccepted_usernames_through_grant_should_expire() {
 			Error::<Test>::NotExpired
 		);
 
-		run_to_block(now + expiration);
+		System::run_to_block::<AllPalletsWithSystem>(now + expiration);
 
 		// Anyone can remove
 		assert_ok!(Identity::remove_expired_approval(
@@ -1782,7 +1770,7 @@ fn unaccepted_usernames_through_deposit_should_expire() {
 			Some((who.clone(), expiration, Provider::AuthorityDeposit(username_deposit)))
 		);
 
-		run_to_block(now + expiration - 1);
+		System::run_to_block::<AllPalletsWithSystem>(now + expiration - 1);
 
 		// Cannot be removed
 		assert_noop!(
@@ -1790,7 +1778,7 @@ fn unaccepted_usernames_through_deposit_should_expire() {
 			Error::<Test>::NotExpired
 		);
 
-		run_to_block(now + expiration);
+		System::run_to_block::<AllPalletsWithSystem>(now + expiration);
 
 		// Anyone can remove
 		assert_eq!(
diff --git a/substrate/frame/lottery/src/mock.rs b/substrate/frame/lottery/src/mock.rs
index d2c442e2ac6..b771ed0849f 100644
--- a/substrate/frame/lottery/src/mock.rs
+++ b/substrate/frame/lottery/src/mock.rs
@@ -20,10 +20,7 @@
 use super::*;
 use crate as pallet_lottery;
 
-use frame_support::{
-	derive_impl, parameter_types,
-	traits::{ConstU32, OnFinalize, OnInitialize},
-};
+use frame_support::{derive_impl, parameter_types, traits::ConstU32};
 use frame_support_test::TestRandomness;
 use frame_system::EnsureRoot;
 use sp_runtime::{BuildStorage, Perbill};
@@ -83,16 +80,3 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	.unwrap();
 	t.into()
 }
-
-/// Run until a particular block.
-pub fn run_to_block(n: u64) {
-	while System::block_number() < n {
-		if System::block_number() > 1 {
-			Lottery::on_finalize(System::block_number());
-			System::on_finalize(System::block_number());
-		}
-		System::set_block_number(System::block_number() + 1);
-		System::on_initialize(System::block_number());
-		Lottery::on_initialize(System::block_number());
-	}
-}
diff --git a/substrate/frame/lottery/src/tests.rs b/substrate/frame/lottery/src/tests.rs
index ae3a6c858f2..119be5df492 100644
--- a/substrate/frame/lottery/src/tests.rs
+++ b/substrate/frame/lottery/src/tests.rs
@@ -17,12 +17,11 @@
 
 //! Tests for the module.
 
-use super::*;
-use frame_support::{assert_noop, assert_ok, assert_storage_noop};
-use mock::{
-	new_test_ext, run_to_block, Balances, BalancesCall, Lottery, RuntimeCall, RuntimeOrigin,
-	SystemCall, Test,
+use crate::{
+	mock::{Lottery, *},
+	*,
 };
+use frame_support::{assert_noop, assert_ok, assert_storage_noop};
 use sp_runtime::{traits::BadOrigin, TokenError};
 
 #[test]
@@ -74,13 +73,13 @@ fn basic_end_to_end_works() {
 		assert_eq!(TicketsCount::<Test>::get(), 4);
 
 		// Go to end
-		run_to_block(20);
+		System::run_to_block::<AllPalletsWithSystem>(20);
 		assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(5), call.clone()));
 		// Ticket isn't bought
 		assert_eq!(TicketsCount::<Test>::get(), 4);
 
 		// Go to payout
-		run_to_block(25);
+		System::run_to_block::<AllPalletsWithSystem>(25);
 		// User 1 wins
 		assert_eq!(Balances::free_balance(&1), 70 + 40);
 		// Lottery is reset and restarted
@@ -115,11 +114,11 @@ fn stop_repeat_works() {
 		// Lottery still exists.
 		assert!(crate::Lottery::<Test>::get().is_some());
 		// End and pick a winner.
-		run_to_block(length + delay);
+		System::run_to_block::<AllPalletsWithSystem>(length + delay);
 
 		// Lottery stays dead and does not repeat.
 		assert!(crate::Lottery::<Test>::get().is_none());
-		run_to_block(length + delay + 1);
+		System::run_to_block::<AllPalletsWithSystem>(length + delay + 1);
 		assert!(crate::Lottery::<Test>::get().is_none());
 	});
 }
@@ -281,7 +280,7 @@ fn buy_ticket_works() {
 		assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 1, 20, 5, false));
 
 		// Go to start, buy ticket for transfer
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call));
 		assert_eq!(TicketsCount::<Test>::get(), 1);
 
@@ -300,12 +299,12 @@ fn buy_ticket_works() {
 		assert_eq!(TicketsCount::<Test>::get(), 2);
 
 		// Go to end, can't buy tickets anymore
-		run_to_block(20);
+		System::run_to_block::<AllPalletsWithSystem>(20);
 		assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), call.clone()));
 		assert_eq!(TicketsCount::<Test>::get(), 2);
 
 		// Go to payout, can't buy tickets when there is no lottery open
-		run_to_block(25);
+		System::run_to_block::<AllPalletsWithSystem>(25);
 		assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), call.clone()));
 		assert_eq!(TicketsCount::<Test>::get(), 0);
 		assert_eq!(LotteryIndex::<Test>::get(), 1);
@@ -409,7 +408,7 @@ fn no_participants_works() {
 		assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 10, length, delay, false));
 
 		// End the lottery, no one wins.
-		run_to_block(length + delay);
+		System::run_to_block::<AllPalletsWithSystem>(length + delay);
 	});
 }
 
diff --git a/substrate/frame/migrations/src/mock.rs b/substrate/frame/migrations/src/mock.rs
index 48ff175f813..ea86899cad8 100644
--- a/substrate/frame/migrations/src/mock.rs
+++ b/substrate/frame/migrations/src/mock.rs
@@ -21,12 +21,7 @@
 
 use crate::{mock_helpers::*, Event, Historic};
 
-use frame_support::{
-	derive_impl,
-	migrations::*,
-	traits::{OnFinalize, OnInitialize},
-	weights::Weight,
-};
+use frame_support::{derive_impl, migrations::*, weights::Weight};
 use frame_system::EventRecord;
 use sp_core::H256;
 
@@ -113,18 +108,18 @@ pub fn test_closure<R>(f: impl FnOnce() -> R) -> R {
 	ext.execute_with(f)
 }
 
-pub fn run_to_block(n: u32) {
-	while System::block_number() < n as u64 {
-		log::debug!("Block {}", System::block_number());
-		System::set_block_number(System::block_number() + 1);
-		System::on_initialize(System::block_number());
-		Migrations::on_initialize(System::block_number());
-		// Executive calls this:
-		<Migrations as MultiStepMigrator>::step();
-
-		Migrations::on_finalize(System::block_number());
-		System::on_finalize(System::block_number());
-	}
+pub fn run_to_block(n: u64) {
+	System::run_to_block_with::<AllPalletsWithSystem>(
+		n,
+		frame_system::RunToBlockHooks::default()
+			.before_initialize(|bn| {
+				log::debug!("Block {bn}");
+			})
+			.after_initialize(|_| {
+				// Executive calls this:
+				<Migrations as MultiStepMigrator>::step();
+			}),
+	);
 }
 
 /// Returns the historic migrations, sorted by their identifier.
diff --git a/substrate/frame/nis/src/mock.rs b/substrate/frame/nis/src/mock.rs
index 2b008f8ec2a..08e69ef0de0 100644
--- a/substrate/frame/nis/src/mock.rs
+++ b/substrate/frame/nis/src/mock.rs
@@ -21,7 +21,7 @@ use crate::{self as pallet_nis, Perquintill, WithMaximumOf};
 
 use frame_support::{
 	derive_impl, ord_parameter_types, parameter_types,
-	traits::{fungible::Inspect, ConstU32, ConstU64, OnFinalize, OnInitialize, StorageMapShim},
+	traits::{fungible::Inspect, ConstU32, ConstU64, StorageMapShim},
 	weights::Weight,
 	PalletId,
 };
@@ -145,15 +145,3 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 pub fn new_test_ext_empty() -> sp_io::TestExternalities {
 	frame_system::GenesisConfig::<Test>::default().build_storage().unwrap().into()
 }
-
-pub fn run_to_block(n: u64) {
-	while System::block_number() < n {
-		Nis::on_finalize(System::block_number());
-		Balances::on_finalize(System::block_number());
-		System::on_finalize(System::block_number());
-		System::set_block_number(System::block_number() + 1);
-		System::on_initialize(System::block_number());
-		Balances::on_initialize(System::block_number());
-		Nis::on_initialize(System::block_number());
-	}
-}
diff --git a/substrate/frame/nis/src/tests.rs b/substrate/frame/nis/src/tests.rs
index a17aaf42182..10c39a0d48e 100644
--- a/substrate/frame/nis/src/tests.rs
+++ b/substrate/frame/nis/src/tests.rs
@@ -55,7 +55,7 @@ fn enlarge(amount: Balance, max_bids: u32) {
 #[test]
 fn basic_setup_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 
 		for q in 0..3 {
 			assert!(Queues::<Test>::get(q).is_empty());
@@ -76,7 +76,7 @@ fn basic_setup_works() {
 #[test]
 fn place_bid_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_noop!(Nis::place_bid(signed(1), 1, 2), Error::<Test>::AmountTooSmall);
 		assert_noop!(Nis::place_bid(signed(1), 101, 2), FundsUnavailable);
 		assert_noop!(Nis::place_bid(signed(1), 10, 4), Error::<Test>::DurationTooBig);
@@ -90,7 +90,7 @@ fn place_bid_works() {
 #[test]
 fn place_bid_queuing_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Nis::place_bid(signed(1), 20, 2));
 		assert_ok!(Nis::place_bid(signed(1), 10, 2));
 		assert_ok!(Nis::place_bid(signed(1), 5, 2));
@@ -116,7 +116,7 @@ fn place_bid_queuing_works() {
 #[test]
 fn place_bid_fails_when_queue_full() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Nis::place_bid(signed(1), 10, 2));
 		assert_ok!(Nis::place_bid(signed(2), 10, 2));
 		assert_ok!(Nis::place_bid(signed(3), 10, 2));
@@ -128,7 +128,7 @@ fn place_bid_fails_when_queue_full() {
 #[test]
 fn multiple_place_bids_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Nis::place_bid(signed(1), 10, 1));
 		assert_ok!(Nis::place_bid(signed(1), 10, 2));
 		assert_ok!(Nis::place_bid(signed(1), 10, 2));
@@ -154,7 +154,7 @@ fn multiple_place_bids_works() {
 #[test]
 fn retract_single_item_queue_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Nis::place_bid(signed(1), 10, 1));
 		assert_ok!(Nis::place_bid(signed(1), 10, 2));
 		assert_ok!(Nis::retract_bid(signed(1), 10, 1));
@@ -169,7 +169,7 @@ fn retract_single_item_queue_works() {
 #[test]
 fn retract_with_other_and_duplicate_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Nis::place_bid(signed(1), 10, 1));
 		assert_ok!(Nis::place_bid(signed(1), 10, 2));
 		assert_ok!(Nis::place_bid(signed(1), 10, 2));
@@ -190,7 +190,7 @@ fn retract_with_other_and_duplicate_works() {
 #[test]
 fn retract_non_existent_item_fails() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_noop!(Nis::retract_bid(signed(1), 10, 1), Error::<Test>::UnknownBid);
 		assert_ok!(Nis::place_bid(signed(1), 10, 1));
 		assert_noop!(Nis::retract_bid(signed(1), 20, 1), Error::<Test>::UnknownBid);
@@ -202,7 +202,7 @@ fn retract_non_existent_item_fails() {
 #[test]
 fn basic_enlarge_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Nis::place_bid(signed(1), 40, 1));
 		assert_ok!(Nis::place_bid(signed(2), 40, 2));
 		enlarge(40, 2);
@@ -240,7 +240,7 @@ fn basic_enlarge_works() {
 #[test]
 fn enlarge_respects_bids_limit() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Nis::place_bid(signed(1), 40, 1));
 		assert_ok!(Nis::place_bid(signed(2), 40, 2));
 		assert_ok!(Nis::place_bid(signed(3), 40, 2));
@@ -285,7 +285,7 @@ fn enlarge_respects_bids_limit() {
 #[test]
 fn enlarge_respects_amount_limit_and_will_split() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Nis::place_bid(signed(1), 80, 1));
 		enlarge(40, 2);
 
@@ -317,7 +317,7 @@ fn enlarge_respects_amount_limit_and_will_split() {
 #[test]
 fn basic_thaw_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Nis::place_bid(signed(1), 40, 1));
 		assert_eq!(Nis::issuance().effective, 400);
 		assert_eq!(Balances::free_balance(1), 60);
@@ -330,9 +330,9 @@ fn basic_thaw_works() {
 		assert_eq!(Balances::reserved_balance(1), 40);
 		assert_eq!(holdings(), 40);
 
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert_noop!(Nis::thaw_private(signed(1), 0, None), Error::<Test>::NotExpired);
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_noop!(Nis::thaw_private(signed(1), 1, None), Error::<Test>::UnknownReceipt);
 		assert_noop!(Nis::thaw_private(signed(2), 0, None), Error::<Test>::NotOwner);
 
@@ -359,12 +359,12 @@ fn basic_thaw_works() {
 #[test]
 fn partial_thaw_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Nis::place_bid(signed(1), 80, 1));
 		enlarge(80, 1);
 		assert_eq!(holdings(), 80);
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		let prop = Perquintill::from_rational(4_100_000, 21_000_000u64);
 		assert_noop!(Nis::thaw_private(signed(1), 0, Some(prop)), Error::<Test>::MakesDust);
 		let prop = Perquintill::from_rational(1_050_000, 21_000_000u64);
@@ -402,10 +402,10 @@ fn partial_thaw_works() {
 #[test]
 fn thaw_respects_transfers() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Nis::place_bid(signed(1), 40, 1));
 		enlarge(40, 1);
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 
 		assert_eq!(Nis::owner(&0), Some(1));
 		assert_eq!(Balances::reserved_balance(&1), 40);
@@ -428,10 +428,10 @@ fn thaw_respects_transfers() {
 #[test]
 fn communify_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Nis::place_bid(signed(1), 40, 1));
 		enlarge(40, 1);
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 
 		assert_eq!(Nis::owner(&0), Some(1));
 		assert_eq!(Balances::reserved_balance(&1), 40);
@@ -479,10 +479,10 @@ fn communify_works() {
 #[test]
 fn privatize_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Nis::place_bid(signed(1), 40, 1));
 		enlarge(40, 1);
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_noop!(Nis::privatize(signed(2), 0), Error::<Test>::AlreadyPrivate);
 		assert_ok!(Nis::communify(signed(1), 0));
 
@@ -503,11 +503,11 @@ fn privatize_works() {
 #[test]
 fn privatize_and_thaw_with_another_receipt_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Nis::place_bid(signed(1), 40, 1));
 		assert_ok!(Nis::place_bid(signed(2), 40, 1));
 		enlarge(80, 2);
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 
 		assert_ok!(Nis::communify(signed(1), 0));
 		assert_ok!(Nis::communify(signed(2), 1));
@@ -535,7 +535,7 @@ fn privatize_and_thaw_with_another_receipt_works() {
 #[test]
 fn communal_thaw_when_issuance_higher_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Balances::transfer_allow_death(signed(2), 1, 1));
 		assert_ok!(Nis::place_bid(signed(1), 100, 1));
 		enlarge(100, 1);
@@ -552,7 +552,7 @@ fn communal_thaw_when_issuance_higher_works() {
 		assert_ok!(Balances::mint_into(&3, 50));
 		assert_ok!(Balances::mint_into(&4, 50));
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 
 		// Unfunded initially...
 		assert_noop!(Nis::thaw_communal(signed(1), 0), Error::<Test>::Unfunded);
@@ -581,7 +581,7 @@ fn communal_thaw_when_issuance_higher_works() {
 #[test]
 fn private_thaw_when_issuance_higher_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Balances::transfer_allow_death(signed(2), 1, 1));
 		assert_ok!(Nis::place_bid(signed(1), 100, 1));
 		enlarge(100, 1);
@@ -591,7 +591,7 @@ fn private_thaw_when_issuance_higher_works() {
 		assert_ok!(Balances::mint_into(&3, 50));
 		assert_ok!(Balances::mint_into(&4, 50));
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 
 		// Unfunded initially...
 		assert_noop!(Nis::thaw_private(signed(1), 0, None), Error::<Test>::Unfunded);
@@ -609,7 +609,7 @@ fn private_thaw_when_issuance_higher_works() {
 #[test]
 fn thaw_with_ignored_issuance_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		// Give account zero some balance.
 		assert_ok!(Balances::mint_into(&0, 200));
 
@@ -622,7 +622,7 @@ fn thaw_with_ignored_issuance_works() {
 		assert_ok!(Balances::transfer_allow_death(signed(0), 3, 50));
 		assert_ok!(Balances::transfer_allow_death(signed(0), 4, 50));
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		// Unfunded initially...
 		assert_noop!(Nis::thaw_private(signed(1), 0, None), Error::<Test>::Unfunded);
 		// ...so we fund...
@@ -640,7 +640,7 @@ fn thaw_with_ignored_issuance_works() {
 #[test]
 fn thaw_when_issuance_lower_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Balances::transfer_allow_death(signed(2), 1, 1));
 		assert_ok!(Nis::place_bid(signed(1), 100, 1));
 		enlarge(100, 1);
@@ -650,7 +650,7 @@ fn thaw_when_issuance_lower_works() {
 		assert_ok!(Balances::burn_from(&3, 25, Expendable, Exact, Force));
 		assert_ok!(Balances::burn_from(&4, 25, Expendable, Exact, Force));
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_ok!(Nis::thaw_private(signed(1), 0, None));
 
 		assert_ok!(Balances::transfer_allow_death(signed(1), 2, 1));
@@ -662,7 +662,7 @@ fn thaw_when_issuance_lower_works() {
 #[test]
 fn multiple_thaws_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Balances::transfer_allow_death(signed(3), 1, 1));
 		assert_ok!(Nis::place_bid(signed(1), 40, 1));
 		assert_ok!(Nis::place_bid(signed(1), 60, 1));
@@ -675,11 +675,11 @@ fn multiple_thaws_works() {
 		assert_ok!(Balances::mint_into(&4, 100));
 		assert_ok!(Nis::fund_deficit(signed(1)));
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_ok!(Nis::thaw_private(signed(1), 0, None));
 		assert_ok!(Nis::thaw_private(signed(1), 1, None));
 		assert_noop!(Nis::thaw_private(signed(2), 2, None), Error::<Test>::Throttled);
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		assert_ok!(Nis::thaw_private(signed(2), 2, None));
 
 		assert_ok!(Balances::transfer_allow_death(signed(1), 3, 1));
@@ -693,7 +693,7 @@ fn multiple_thaws_works() {
 #[test]
 fn multiple_thaws_works_in_alternative_thaw_order() {
 	new_test_ext().execute_with(|| {
-		run_to_block(1);
+		System::run_to_block::<AllPalletsWithSystem>(1);
 		assert_ok!(Balances::transfer_allow_death(signed(3), 1, 1));
 		assert_ok!(Nis::place_bid(signed(1), 40, 1));
 		assert_ok!(Nis::place_bid(signed(1), 60, 1));
@@ -706,12 +706,12 @@ fn multiple_thaws_works_in_alternative_thaw_order() {
 		assert_ok!(Balances::mint_into(&4, 100));
 		assert_ok!(Nis::fund_deficit(signed(1)));
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_ok!(Nis::thaw_private(signed(2), 2, None));
 		assert_noop!(Nis::thaw_private(signed(1), 1, None), Error::<Test>::Throttled);
 		assert_ok!(Nis::thaw_private(signed(1), 0, None));
 
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		assert_ok!(Nis::thaw_private(signed(1), 1, None));
 
 		assert_ok!(Balances::transfer_allow_death(signed(1), 3, 1));
@@ -725,7 +725,7 @@ fn multiple_thaws_works_in_alternative_thaw_order() {
 #[test]
 fn enlargement_to_target_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(2);
+		System::run_to_block::<AllPalletsWithSystem>(2);
 		let w = <() as WeightInfo>::process_queues() +
 			<() as WeightInfo>::process_queue() +
 			(<() as WeightInfo>::process_bid() * 2);
@@ -737,7 +737,7 @@ fn enlargement_to_target_works() {
 		assert_ok!(Nis::place_bid(signed(3), 40, 3));
 		Target::set(Perquintill::from_percent(40));
 
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert_eq!(Queues::<Test>::get(1), vec![Bid { amount: 40, who: 1 },]);
 		assert_eq!(
 			Queues::<Test>::get(2),
@@ -749,7 +749,7 @@ fn enlargement_to_target_works() {
 		);
 		assert_eq!(QueueTotals::<Test>::get(), vec![(1, 40), (2, 80), (2, 80)]);
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		// Two new items should have been issued to 2 & 3 for 40 each & duration of 3.
 		assert_eq!(
 			Receipts::<Test>::get(0).unwrap(),
@@ -778,7 +778,7 @@ fn enlargement_to_target_works() {
 			}
 		);
 
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		// No change
 		assert_eq!(
 			Summary::<Test>::get(),
@@ -791,7 +791,7 @@ fn enlargement_to_target_works() {
 			}
 		);
 
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		// Two new items should have been issued to 1 & 2 for 40 each & duration of 2.
 		assert_eq!(
 			Receipts::<Test>::get(2).unwrap(),
@@ -820,7 +820,7 @@ fn enlargement_to_target_works() {
 			}
 		);
 
-		run_to_block(8);
+		System::run_to_block::<AllPalletsWithSystem>(8);
 		// No change now.
 		assert_eq!(
 			Summary::<Test>::get(),
@@ -835,7 +835,7 @@ fn enlargement_to_target_works() {
 
 		// Set target a bit higher to use up the remaining bid.
 		Target::set(Perquintill::from_percent(60));
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 
 		// One new item should have been issued to 1 for 40 each & duration of 2.
 		assert_eq!(
diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs
index cc942039760..f544e79ec48 100644
--- a/substrate/frame/nomination-pools/src/mock.rs
+++ b/substrate/frame/nomination-pools/src/mock.rs
@@ -435,18 +435,7 @@ parameter_types! {
 /// Helper to run a specified amount of blocks.
 pub fn run_blocks(n: u64) {
 	let current_block = System::block_number();
-	run_to_block(n + current_block);
-}
-
-/// Helper to run to a specific block.
-pub fn run_to_block(n: u64) {
-	let current_block = System::block_number();
-	assert!(n > current_block);
-	while System::block_number() < n {
-		Pools::on_finalize(System::block_number());
-		System::set_block_number(System::block_number() + 1);
-		Pools::on_initialize(System::block_number());
-	}
+	System::run_to_block::<AllPalletsWithSystem>(n + current_block);
 }
 
 /// All events of this pallet.
diff --git a/substrate/frame/recovery/src/mock.rs b/substrate/frame/recovery/src/mock.rs
index 3930db82d6c..86f13b0da4f 100644
--- a/substrate/frame/recovery/src/mock.rs
+++ b/substrate/frame/recovery/src/mock.rs
@@ -20,10 +20,7 @@
 use super::*;
 
 use crate as recovery;
-use frame_support::{
-	derive_impl, parameter_types,
-	traits::{OnFinalize, OnInitialize},
-};
+use frame_support::{derive_impl, parameter_types};
 use sp_runtime::BuildStorage;
 
 type Block = frame_system::mocking::MockBlock<Test>;
@@ -86,14 +83,3 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	.unwrap();
 	t.into()
 }
-
-/// Run until a particular block.
-pub fn run_to_block(n: u64) {
-	while System::block_number() < n {
-		if System::block_number() > 1 {
-			System::on_finalize(System::block_number());
-		}
-		System::set_block_number(System::block_number() + 1);
-		System::on_initialize(System::block_number());
-	}
-}
diff --git a/substrate/frame/recovery/src/tests.rs b/substrate/frame/recovery/src/tests.rs
index 93df0701585..97085df2ae7 100644
--- a/substrate/frame/recovery/src/tests.rs
+++ b/substrate/frame/recovery/src/tests.rs
@@ -17,12 +17,8 @@
 
 //! Tests for the module.
 
-use super::*;
+use crate::{mock::*, *};
 use frame_support::{assert_noop, assert_ok, traits::Currency};
-use mock::{
-	new_test_ext, run_to_block, Balances, BalancesCall, MaxFriends, Recovery, RecoveryCall,
-	RuntimeCall, RuntimeOrigin, Test,
-};
 use sp_runtime::{bounded_vec, traits::BadOrigin};
 
 #[test]
@@ -70,7 +66,7 @@ fn recovery_life_cycle_works() {
 			delay_period
 		));
 		// Some time has passed, and the user lost their keys!
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		// Using account 1, the user begins the recovery process to recover the lost account
 		assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5));
 		// Off chain, the user contacts their friends and asks them to vouch for the recovery
@@ -84,7 +80,7 @@ fn recovery_life_cycle_works() {
 			Error::<Test>::DelayPeriod
 		);
 		// We need to wait at least the delay_period number of blocks before we can recover
-		run_to_block(20);
+		System::run_to_block::<AllPalletsWithSystem>(20);
 		assert_ok!(Recovery::claim_recovery(RuntimeOrigin::signed(1), 5));
 		// Account 1 can use account 5 to close the active recovery process, claiming the deposited
 		// funds used to initiate the recovery process into account 5.
@@ -128,7 +124,7 @@ fn malicious_recovery_fails() {
 			delay_period
 		));
 		// Some time has passed, and account 1 wants to try and attack this account!
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		// Using account 1, the malicious user begins the recovery process on account 5
 		assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5));
 		// Off chain, the user **tricks** their friends and asks them to vouch for the recovery
@@ -144,7 +140,7 @@ fn malicious_recovery_fails() {
 			Error::<Test>::DelayPeriod
 		);
 		// Account 1 needs to wait...
-		run_to_block(19);
+		System::run_to_block::<AllPalletsWithSystem>(19);
 		// One more block to wait!
 		assert_noop!(
 			Recovery::claim_recovery(RuntimeOrigin::signed(1), 5),
@@ -158,7 +154,7 @@ fn malicious_recovery_fails() {
 		// Thanks for the free money!
 		assert_eq!(Balances::total_balance(&5), 110);
 		// The recovery process has been closed, so account 1 can't make the claim
-		run_to_block(20);
+		System::run_to_block::<AllPalletsWithSystem>(20);
 		assert_noop!(
 			Recovery::claim_recovery(RuntimeOrigin::signed(1), 5),
 			Error::<Test>::NotStarted
@@ -397,7 +393,7 @@ fn claim_recovery_handles_basic_errors() {
 			Recovery::claim_recovery(RuntimeOrigin::signed(1), 5),
 			Error::<Test>::DelayPeriod
 		);
-		run_to_block(11);
+		System::run_to_block::<AllPalletsWithSystem>(11);
 		// Cannot claim an account which has not passed the threshold number of votes
 		assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1));
 		assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 1));
@@ -427,7 +423,7 @@ fn claim_recovery_works() {
 		assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 1));
 		assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(4), 5, 1));
 
-		run_to_block(11);
+		System::run_to_block::<AllPalletsWithSystem>(11);
 
 		// Account can be recovered.
 		assert_ok!(Recovery::claim_recovery(RuntimeOrigin::signed(1), 5));
@@ -439,7 +435,7 @@ fn claim_recovery_works() {
 		assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 4));
 		assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(4), 5, 4));
 
-		run_to_block(21);
+		System::run_to_block::<AllPalletsWithSystem>(21);
 
 		// Account is re-recovered.
 		assert_ok!(Recovery::claim_recovery(RuntimeOrigin::signed(4), 5));
diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs
index a27fb36f64a..7a96b8eade4 100644
--- a/substrate/frame/root-offences/src/mock.rs
+++ b/substrate/frame/root-offences/src/mock.rs
@@ -25,7 +25,7 @@ use frame_election_provider_support::{
 };
 use frame_support::{
 	derive_impl, parameter_types,
-	traits::{ConstU32, ConstU64, Hooks, OneSessionHandler},
+	traits::{ConstU32, ConstU64, OneSessionHandler},
 };
 use pallet_staking::StakerStatus;
 use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage};
@@ -283,16 +283,12 @@ pub(crate) fn start_session(session_index: SessionIndex) {
 /// a block import/propose process where we first initialize the block, then execute some stuff (not
 /// in the function), and then finalize the block.
 pub(crate) fn run_to_block(n: BlockNumber) {
-	Staking::on_finalize(System::block_number());
-	for b in (System::block_number() + 1)..=n {
-		System::set_block_number(b);
-		Session::on_initialize(b);
-		<Staking as Hooks<u64>>::on_initialize(b);
-		Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP);
-		if b != n {
-			Staking::on_finalize(System::block_number());
-		}
-	}
+	System::run_to_block_with::<AllPalletsWithSystem>(
+		n,
+		frame_system::RunToBlockHooks::default().after_initialize(|bn| {
+			Timestamp::set_timestamp(bn * BLOCK_TIME + INIT_TIMESTAMP);
+		}),
+	);
 }
 
 pub(crate) fn active_era() -> EraIndex {
diff --git a/substrate/frame/scheduler/src/mock.rs b/substrate/frame/scheduler/src/mock.rs
index 8d36ca1c42e..43a964bcf14 100644
--- a/substrate/frame/scheduler/src/mock.rs
+++ b/substrate/frame/scheduler/src/mock.rs
@@ -22,7 +22,7 @@ use super::*;
 use crate as scheduler;
 use frame_support::{
 	derive_impl, ord_parameter_types, parameter_types,
-	traits::{ConstU32, Contains, EitherOfDiverse, EqualPrivilegeOnly, OnFinalize, OnInitialize},
+	traits::{ConstU32, Contains, EitherOfDiverse, EqualPrivilegeOnly},
 };
 use frame_system::{EnsureRoot, EnsureSignedBy};
 use sp_runtime::{BuildStorage, Perbill};
@@ -236,14 +236,6 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	t.into()
 }
 
-pub fn run_to_block(n: u64) {
-	while System::block_number() < n {
-		Scheduler::on_finalize(System::block_number());
-		System::set_block_number(System::block_number() + 1);
-		Scheduler::on_initialize(System::block_number());
-	}
-}
-
 pub fn root() -> OriginCaller {
 	system::RawOrigin::Root.into()
 }
diff --git a/substrate/frame/scheduler/src/tests.rs b/substrate/frame/scheduler/src/tests.rs
index 3023a370a4b..75522393410 100644
--- a/substrate/frame/scheduler/src/tests.rs
+++ b/substrate/frame/scheduler/src/tests.rs
@@ -20,7 +20,7 @@
 use super::*;
 use crate::mock::{
 	logger::{self, Threshold},
-	new_test_ext, root, run_to_block, LoggerCall, RuntimeCall, Scheduler, Test, *,
+	new_test_ext, root, LoggerCall, RuntimeCall, Scheduler, Test, *,
 };
 use frame_support::{
 	assert_err, assert_noop, assert_ok,
@@ -52,14 +52,14 @@ fn basic_scheduling_works() {
 		));
 
 		// `log` runtime call should not have executed yet
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		// `log` runtime call should have executed at block 4
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 	});
 }
@@ -87,17 +87,17 @@ fn scheduling_with_preimages_works() {
 		assert!(Preimage::is_requested(&hash));
 
 		// `log` runtime call should not have executed yet
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		// preimage should not have been removed when executed by the scheduler
 		assert!(!Preimage::len(&hash).is_some());
 		assert!(!Preimage::is_requested(&hash));
 		// `log` runtime call should have executed at block 4
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 	});
 }
@@ -105,7 +105,7 @@ fn scheduling_with_preimages_works() {
 #[test]
 fn schedule_after_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(2);
+		System::run_to_block::<AllPalletsWithSystem>(2);
 		let call =
 			RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_parts(10, 0) });
 		assert!(!<Test as frame_system::Config>::BaseCallFilter::contains(&call));
@@ -117,11 +117,11 @@ fn schedule_after_works() {
 			root(),
 			Preimage::bound(call).unwrap()
 		));
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		assert!(logger::log().is_empty());
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 	});
 }
@@ -129,7 +129,7 @@ fn schedule_after_works() {
 #[test]
 fn schedule_after_zero_works() {
 	new_test_ext().execute_with(|| {
-		run_to_block(2);
+		System::run_to_block::<AllPalletsWithSystem>(2);
 		let call =
 			RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_parts(10, 0) });
 		assert!(!<Test as frame_system::Config>::BaseCallFilter::contains(&call));
@@ -141,9 +141,9 @@ fn schedule_after_zero_works() {
 			Preimage::bound(call).unwrap()
 		));
 		// Will trigger on the next block.
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 	});
 }
@@ -163,19 +163,19 @@ fn periodic_scheduling_works() {
 			}))
 			.unwrap()
 		));
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
-		run_to_block(7);
+		System::run_to_block::<AllPalletsWithSystem>(7);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]);
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]);
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]);
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]);
 	});
 }
@@ -201,37 +201,37 @@ fn retry_scheduling_works() {
 		// retry 10 times every 3 blocks
 		assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 3));
 		assert_eq!(Retries::<Test>::iter().count(), 1);
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 		assert!(Agenda::<Test>::get(4)[0].is_some());
 		// task should be retried in block 7
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert!(Agenda::<Test>::get(4).is_empty());
 		assert!(Agenda::<Test>::get(7)[0].is_some());
 		assert!(logger::log().is_empty());
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		assert!(Agenda::<Test>::get(7)[0].is_some());
 		assert!(logger::log().is_empty());
 		// task still fails, should be retried in block 10
-		run_to_block(7);
+		System::run_to_block::<AllPalletsWithSystem>(7);
 		assert!(Agenda::<Test>::get(7).is_empty());
 		assert!(Agenda::<Test>::get(10)[0].is_some());
 		assert!(logger::log().is_empty());
-		run_to_block(8);
+		System::run_to_block::<AllPalletsWithSystem>(8);
 		assert!(Agenda::<Test>::get(10)[0].is_some());
 		assert!(logger::log().is_empty());
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert!(logger::log().is_empty());
 		assert_eq!(Retries::<Test>::iter().count(), 1);
 		// finally it should succeed
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 		assert_eq!(Retries::<Test>::iter().count(), 0);
-		run_to_block(11);
+		System::run_to_block::<AllPalletsWithSystem>(11);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
-		run_to_block(12);
+		System::run_to_block::<AllPalletsWithSystem>(12);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 	});
 }
@@ -262,37 +262,37 @@ fn named_retry_scheduling_works() {
 		// retry 10 times every 3 blocks
 		assert_ok!(Scheduler::set_retry_named(root().into(), [1u8; 32], 10, 3));
 		assert_eq!(Retries::<Test>::iter().count(), 1);
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 		assert!(Agenda::<Test>::get(4)[0].is_some());
 		// task should be retried in block 7
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert!(Agenda::<Test>::get(4).is_empty());
 		assert!(Agenda::<Test>::get(7)[0].is_some());
 		assert!(logger::log().is_empty());
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		assert!(Agenda::<Test>::get(7)[0].is_some());
 		assert!(logger::log().is_empty());
 		// task still fails, should be retried in block 10
-		run_to_block(7);
+		System::run_to_block::<AllPalletsWithSystem>(7);
 		assert!(Agenda::<Test>::get(7).is_empty());
 		assert!(Agenda::<Test>::get(10)[0].is_some());
 		assert!(logger::log().is_empty());
-		run_to_block(8);
+		System::run_to_block::<AllPalletsWithSystem>(8);
 		assert!(Agenda::<Test>::get(10)[0].is_some());
 		assert!(logger::log().is_empty());
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert!(logger::log().is_empty());
 		assert_eq!(Retries::<Test>::iter().count(), 1);
 		// finally it should succeed
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 		assert_eq!(Retries::<Test>::iter().count(), 0);
-		run_to_block(11);
+		System::run_to_block::<AllPalletsWithSystem>(11);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
-		run_to_block(12);
+		System::run_to_block::<AllPalletsWithSystem>(12);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 	});
 }
@@ -333,11 +333,11 @@ fn retry_scheduling_multiple_tasks_works() {
 		// task 42 will be retried 10 times every 3 blocks
 		assert_ok!(Scheduler::set_retry(root().into(), (4, 1), 10, 3));
 		assert_eq!(Retries::<Test>::iter().count(), 2);
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 		assert_eq!(Agenda::<Test>::get(4).len(), 2);
 		// both tasks fail
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert!(Agenda::<Test>::get(4).is_empty());
 		// 20 is rescheduled for next block
 		assert_eq!(Agenda::<Test>::get(5).len(), 1);
@@ -345,41 +345,41 @@ fn retry_scheduling_multiple_tasks_works() {
 		assert_eq!(Agenda::<Test>::get(7).len(), 1);
 		assert!(logger::log().is_empty());
 		// 20 still fails
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		// 20 rescheduled for next block
 		assert_eq!(Agenda::<Test>::get(6).len(), 1);
 		assert_eq!(Agenda::<Test>::get(7).len(), 1);
 		assert_eq!(Retries::<Test>::iter().count(), 2);
 		assert!(logger::log().is_empty());
 		// 20 still fails
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		// rescheduled for next block together with 42
 		assert_eq!(Agenda::<Test>::get(7).len(), 2);
 		assert_eq!(Retries::<Test>::iter().count(), 2);
 		assert!(logger::log().is_empty());
 		// both tasks will fail, for 20 it was the last retry so it's dropped
-		run_to_block(7);
+		System::run_to_block::<AllPalletsWithSystem>(7);
 		assert!(Agenda::<Test>::get(7).is_empty());
 		assert!(Agenda::<Test>::get(8).is_empty());
 		// 42 is rescheduled for block 10
 		assert_eq!(Agenda::<Test>::get(10).len(), 1);
 		assert_eq!(Retries::<Test>::iter().count(), 1);
 		assert!(logger::log().is_empty());
-		run_to_block(8);
+		System::run_to_block::<AllPalletsWithSystem>(8);
 		assert_eq!(Agenda::<Test>::get(10).len(), 1);
 		assert!(logger::log().is_empty());
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert!(logger::log().is_empty());
 		assert_eq!(Retries::<Test>::iter().count(), 1);
 		// 42 runs successfully
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 		assert_eq!(Retries::<Test>::iter().count(), 0);
-		run_to_block(11);
+		System::run_to_block::<AllPalletsWithSystem>(11);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
-		run_to_block(12);
+		System::run_to_block::<AllPalletsWithSystem>(12);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 	});
 }
@@ -422,11 +422,11 @@ fn retry_scheduling_multiple_named_tasks_works() {
 		// task 42 will be retried 10 times every 3 block
 		assert_ok!(Scheduler::set_retry_named(root().into(), [42u8; 32], 10, 3));
 		assert_eq!(Retries::<Test>::iter().count(), 2);
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 		assert_eq!(Agenda::<Test>::get(4).len(), 2);
 		// both tasks fail
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert!(Agenda::<Test>::get(4).is_empty());
 		// 42 is rescheduled for block 7
 		assert_eq!(Agenda::<Test>::get(7).len(), 1);
@@ -434,41 +434,41 @@ fn retry_scheduling_multiple_named_tasks_works() {
 		assert_eq!(Agenda::<Test>::get(5).len(), 1);
 		assert!(logger::log().is_empty());
 		// 20 still fails
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		// 20 rescheduled for next block
 		assert_eq!(Agenda::<Test>::get(6).len(), 1);
 		assert_eq!(Agenda::<Test>::get(7).len(), 1);
 		assert_eq!(Retries::<Test>::iter().count(), 2);
 		assert!(logger::log().is_empty());
 		// 20 still fails
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		// 20 rescheduled for next block together with 42
 		assert_eq!(Agenda::<Test>::get(7).len(), 2);
 		assert_eq!(Retries::<Test>::iter().count(), 2);
 		assert!(logger::log().is_empty());
 		// both tasks will fail, for 20 it was the last retry so it's dropped
-		run_to_block(7);
+		System::run_to_block::<AllPalletsWithSystem>(7);
 		assert!(Agenda::<Test>::get(7).is_empty());
 		assert!(Agenda::<Test>::get(8).is_empty());
 		// 42 is rescheduled for block 10
 		assert_eq!(Agenda::<Test>::get(10).len(), 1);
 		assert_eq!(Retries::<Test>::iter().count(), 1);
 		assert!(logger::log().is_empty());
-		run_to_block(8);
+		System::run_to_block::<AllPalletsWithSystem>(8);
 		assert_eq!(Agenda::<Test>::get(10).len(), 1);
 		assert!(logger::log().is_empty());
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert!(logger::log().is_empty());
 		assert_eq!(Retries::<Test>::iter().count(), 1);
 		// 42 runs successfully
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 		assert_eq!(Retries::<Test>::iter().count(), 0);
-		run_to_block(11);
+		System::run_to_block::<AllPalletsWithSystem>(11);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
-		run_to_block(12);
+		System::run_to_block::<AllPalletsWithSystem>(12);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 	});
 }
@@ -495,33 +495,33 @@ fn retry_scheduling_with_period_works() {
 		// 42 will be retried 10 times every 2 blocks
 		assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 2));
 		assert_eq!(Retries::<Test>::iter().count(), 1);
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 		assert!(Agenda::<Test>::get(4)[0].is_some());
 		// 42 runs successfully once, it will run again at block 7
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert!(Agenda::<Test>::get(4).is_empty());
 		assert!(Agenda::<Test>::get(7)[0].is_some());
 		assert_eq!(Retries::<Test>::iter().count(), 1);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 		// nothing changed
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		assert!(Agenda::<Test>::get(7)[0].is_some());
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 		// 42 runs successfully again, it will run again at block 10
-		run_to_block(7);
+		System::run_to_block::<AllPalletsWithSystem>(7);
 		assert!(Agenda::<Test>::get(7).is_empty());
 		assert!(Agenda::<Test>::get(10)[0].is_some());
 		assert_eq!(Retries::<Test>::iter().count(), 1);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]);
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert!(Agenda::<Test>::get(10)[0].is_some());
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]);
 		// 42 has 10 retries left out of a total of 10
 		assert_eq!(Retries::<Test>::get((10, 0)).unwrap().remaining, 10);
 		// 42 will fail because we're outside the set threshold (block number in `4..8`), so it
 		// should be retried in 2 blocks (at block 12)
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		// should be queued for the normal period of 3 blocks
 		assert!(Agenda::<Test>::get(13)[0].is_some());
 		// should also be queued to be retried in 2 blocks
@@ -532,7 +532,7 @@ fn retry_scheduling_with_period_works() {
 		assert_eq!(Retries::<Test>::iter().count(), 2);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]);
 		// 42 will fail again
-		run_to_block(12);
+		System::run_to_block::<AllPalletsWithSystem>(12);
 		// should still be queued for the normal period
 		assert!(Agenda::<Test>::get(13)[0].is_some());
 		// should be queued to be retried in 2 blocks
@@ -543,7 +543,7 @@ fn retry_scheduling_with_period_works() {
 		assert_eq!(Retries::<Test>::iter().count(), 2);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]);
 		// 42 will fail for the regular periodic run
-		run_to_block(13);
+		System::run_to_block::<AllPalletsWithSystem>(13);
 		// should still be queued for the normal period
 		assert!(Agenda::<Test>::get(16)[0].is_some());
 		// should still be queued to be retried next block
@@ -560,7 +560,7 @@ fn retry_scheduling_with_period_works() {
 		// change the threshold to allow the task to succeed
 		Threshold::<Test>::put((14, 100));
 		// first retry should now succeed
-		run_to_block(14);
+		System::run_to_block::<AllPalletsWithSystem>(14);
 		assert!(Agenda::<Test>::get(15)[0].as_ref().unwrap().maybe_periodic.is_none());
 		assert_eq!(Agenda::<Test>::get(16).iter().filter(|entry| entry.is_some()).count(), 1);
 		assert!(Agenda::<Test>::get(16)[0].is_some());
@@ -569,7 +569,7 @@ fn retry_scheduling_with_period_works() {
 		assert_eq!(Retries::<Test>::iter().count(), 2);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]);
 		// second retry should also succeed
-		run_to_block(15);
+		System::run_to_block::<AllPalletsWithSystem>(15);
 		assert_eq!(Agenda::<Test>::get(16).iter().filter(|entry| entry.is_some()).count(), 1);
 		assert!(Agenda::<Test>::get(16)[0].is_some());
 		assert!(Agenda::<Test>::get(17).is_empty());
@@ -580,7 +580,7 @@ fn retry_scheduling_with_period_works() {
 			vec![(root(), 42u32), (root(), 42u32), (root(), 42u32), (root(), 42u32)]
 		);
 		// normal periodic run on block 16 will succeed
-		run_to_block(16);
+		System::run_to_block::<AllPalletsWithSystem>(16);
 		// next periodic run at block 19
 		assert!(Agenda::<Test>::get(19)[0].is_some());
 		assert!(Agenda::<Test>::get(18).is_empty());
@@ -598,7 +598,7 @@ fn retry_scheduling_with_period_works() {
 			]
 		);
 		// final periodic run on block 19 will succeed
-		run_to_block(19);
+		System::run_to_block::<AllPalletsWithSystem>(19);
 		// next periodic run at block 19
 		assert_eq!(Agenda::<Test>::iter().count(), 0);
 		assert_eq!(Retries::<Test>::iter().count(), 0);
@@ -639,33 +639,33 @@ fn named_retry_scheduling_with_period_works() {
 		// 42 will be retried 10 times every 2 blocks
 		assert_ok!(Scheduler::set_retry_named(root().into(), [42u8; 32], 10, 2));
 		assert_eq!(Retries::<Test>::iter().count(), 1);
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 		assert!(Agenda::<Test>::get(4)[0].is_some());
 		// 42 runs successfully once, it will run again at block 7
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert!(Agenda::<Test>::get(4).is_empty());
 		assert!(Agenda::<Test>::get(7)[0].is_some());
 		assert_eq!(Retries::<Test>::iter().count(), 1);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 		// nothing changed
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		assert!(Agenda::<Test>::get(7)[0].is_some());
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 		// 42 runs successfully again, it will run again at block 10
-		run_to_block(7);
+		System::run_to_block::<AllPalletsWithSystem>(7);
 		assert!(Agenda::<Test>::get(7).is_empty());
 		assert!(Agenda::<Test>::get(10)[0].is_some());
 		assert_eq!(Retries::<Test>::iter().count(), 1);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]);
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert!(Agenda::<Test>::get(10)[0].is_some());
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]);
 		// 42 has 10 retries left out of a total of 10
 		assert_eq!(Retries::<Test>::get((10, 0)).unwrap().remaining, 10);
 		// 42 will fail because we're outside the set threshold (block number in `4..8`), so it
 		// should be retried in 2 blocks (at block 12)
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		// should be queued for the normal period of 3 blocks
 		assert!(Agenda::<Test>::get(13)[0].is_some());
 		// should also be queued to be retried in 2 blocks
@@ -677,7 +677,7 @@ fn named_retry_scheduling_with_period_works() {
 		assert_eq!(Lookup::<Test>::get([42u8; 32]).unwrap(), (13, 0));
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]);
 		// 42 will fail again
-		run_to_block(12);
+		System::run_to_block::<AllPalletsWithSystem>(12);
 		// should still be queued for the normal period
 		assert!(Agenda::<Test>::get(13)[0].is_some());
 		// should be queued to be retried in 2 blocks
@@ -688,7 +688,7 @@ fn named_retry_scheduling_with_period_works() {
 		assert_eq!(Retries::<Test>::iter().count(), 2);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]);
 		// 42 will fail for the regular periodic run
-		run_to_block(13);
+		System::run_to_block::<AllPalletsWithSystem>(13);
 		// should still be queued for the normal period
 		assert!(Agenda::<Test>::get(16)[0].is_some());
 		// should still be queued to be retried next block
@@ -706,7 +706,7 @@ fn named_retry_scheduling_with_period_works() {
 		// change the threshold to allow the task to succeed
 		Threshold::<Test>::put((14, 100));
 		// first retry should now succeed
-		run_to_block(14);
+		System::run_to_block::<AllPalletsWithSystem>(14);
 		assert!(Agenda::<Test>::get(15)[0].as_ref().unwrap().maybe_periodic.is_none());
 		assert_eq!(Agenda::<Test>::get(16).iter().filter(|entry| entry.is_some()).count(), 1);
 		assert!(Agenda::<Test>::get(16)[0].is_some());
@@ -715,7 +715,7 @@ fn named_retry_scheduling_with_period_works() {
 		assert_eq!(Retries::<Test>::iter().count(), 2);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]);
 		// second retry should also succeed
-		run_to_block(15);
+		System::run_to_block::<AllPalletsWithSystem>(15);
 		assert_eq!(Agenda::<Test>::get(16).iter().filter(|entry| entry.is_some()).count(), 1);
 		assert!(Agenda::<Test>::get(16)[0].is_some());
 		assert!(Agenda::<Test>::get(17).is_empty());
@@ -727,7 +727,7 @@ fn named_retry_scheduling_with_period_works() {
 			vec![(root(), 42u32), (root(), 42u32), (root(), 42u32), (root(), 42u32)]
 		);
 		// normal periodic run on block 16 will succeed
-		run_to_block(16);
+		System::run_to_block::<AllPalletsWithSystem>(16);
 		// next periodic run at block 19
 		assert!(Agenda::<Test>::get(19)[0].is_some());
 		assert!(Agenda::<Test>::get(18).is_empty());
@@ -746,7 +746,7 @@ fn named_retry_scheduling_with_period_works() {
 			]
 		);
 		// final periodic run on block 19 will succeed
-		run_to_block(19);
+		System::run_to_block::<AllPalletsWithSystem>(19);
 		// next periodic run at block 19
 		assert_eq!(Agenda::<Test>::iter().count(), 0);
 		assert_eq!(Retries::<Test>::iter().count(), 0);
@@ -786,12 +786,12 @@ fn retry_scheduling_expires() {
 		// task 42 will be retried 3 times every block
 		assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 3, 1));
 		assert_eq!(Retries::<Test>::iter().count(), 1);
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 		// task 42 is scheduled for next block
 		assert!(Agenda::<Test>::get(4)[0].is_some());
 		// task fails because we're past block 3
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		// task is scheduled for next block
 		assert!(Agenda::<Test>::get(4).is_empty());
 		assert!(Agenda::<Test>::get(5)[0].is_some());
@@ -799,7 +799,7 @@ fn retry_scheduling_expires() {
 		assert_eq!(Retries::<Test>::get((5, 0)).unwrap().remaining, 2);
 		assert!(logger::log().is_empty());
 		// task fails again
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		// task is scheduled for next block
 		assert!(Agenda::<Test>::get(5).is_empty());
 		assert!(Agenda::<Test>::get(6)[0].is_some());
@@ -807,7 +807,7 @@ fn retry_scheduling_expires() {
 		assert_eq!(Retries::<Test>::get((6, 0)).unwrap().remaining, 1);
 		assert!(logger::log().is_empty());
 		// task fails again
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		// task is scheduled for next block
 		assert!(Agenda::<Test>::get(6).is_empty());
 		assert!(Agenda::<Test>::get(7)[0].is_some());
@@ -815,7 +815,7 @@ fn retry_scheduling_expires() {
 		assert_eq!(Retries::<Test>::get((7, 0)).unwrap().remaining, 0);
 		assert!(logger::log().is_empty());
 		// task fails again
-		run_to_block(7);
+		System::run_to_block::<AllPalletsWithSystem>(7);
 		// task ran out of retries so it gets dropped
 		assert_eq!(Agenda::<Test>::iter().count(), 0);
 		assert_eq!(Retries::<Test>::iter().count(), 0);
@@ -949,17 +949,17 @@ fn retry_periodic_full_cycle() {
 		// 42 will be retried 2 times every block
 		assert_ok!(Scheduler::set_retry_named(root().into(), [42u8; 32], 2, 1));
 		assert_eq!(Retries::<Test>::iter().count(), 1);
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert!(logger::log().is_empty());
 		assert!(Agenda::<Test>::get(10)[0].is_some());
 		// 42 runs successfully once, it will run again at block 110
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		assert!(Agenda::<Test>::get(10).is_empty());
 		assert!(Agenda::<Test>::get(110)[0].is_some());
 		assert_eq!(Retries::<Test>::iter().count(), 1);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 		// nothing changed
-		run_to_block(109);
+		System::run_to_block::<AllPalletsWithSystem>(109);
 		assert!(Agenda::<Test>::get(110)[0].is_some());
 		// original task still has 2 remaining retries
 		assert_eq!(Retries::<Test>::get((110, 0)).unwrap().remaining, 2);
@@ -968,7 +968,7 @@ fn retry_periodic_full_cycle() {
 		Threshold::<Test>::put((1, 2));
 		// 42 will fail because we're outside the set threshold (block number in `1..2`), so it
 		// should be retried next block (at block 111)
-		run_to_block(110);
+		System::run_to_block::<AllPalletsWithSystem>(110);
 		// should be queued for the normal period of 100 blocks
 		assert!(Agenda::<Test>::get(210)[0].is_some());
 		// should also be queued to be retried next block
@@ -980,7 +980,7 @@ fn retry_periodic_full_cycle() {
 		assert_eq!(Retries::<Test>::iter().count(), 2);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 		// 42 retry will fail again
-		run_to_block(111);
+		System::run_to_block::<AllPalletsWithSystem>(111);
 		// should still be queued for the normal period
 		assert!(Agenda::<Test>::get(210)[0].is_some());
 		// should be queued to be retried next block
@@ -991,20 +991,20 @@ fn retry_periodic_full_cycle() {
 		assert_eq!(Retries::<Test>::iter().count(), 2);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 		// 42 retry will fail again
-		run_to_block(112);
+		System::run_to_block::<AllPalletsWithSystem>(112);
 		// should still be queued for the normal period
 		assert!(Agenda::<Test>::get(210)[0].is_some());
 		// 42 retry clone ran out of retries, must have been evicted
 		assert_eq!(Agenda::<Test>::iter().count(), 1);
 
 		// advance
-		run_to_block(209);
+		System::run_to_block::<AllPalletsWithSystem>(209);
 		// should still be queued for the normal period
 		assert!(Agenda::<Test>::get(210)[0].is_some());
 		// 42 retry clone ran out of retries, must have been evicted
 		assert_eq!(Agenda::<Test>::iter().count(), 1);
 		// 42 should fail again and should spawn another retry clone
-		run_to_block(210);
+		System::run_to_block::<AllPalletsWithSystem>(210);
 		// should be queued for the normal period of 100 blocks
 		assert!(Agenda::<Test>::get(310)[0].is_some());
 		// should also be queued to be retried next block
@@ -1018,7 +1018,7 @@ fn retry_periodic_full_cycle() {
 		// make 42 run successfully again
 		Threshold::<Test>::put((1, 1000));
 		// 42 retry clone should now succeed
-		run_to_block(211);
+		System::run_to_block::<AllPalletsWithSystem>(211);
 		// should be queued for the normal period of 100 blocks
 		assert!(Agenda::<Test>::get(310)[0].is_some());
 		// retry was successful, retry task should have been discarded
@@ -1029,7 +1029,7 @@ fn retry_periodic_full_cycle() {
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]);
 
 		// fast forward to the last periodic run of 42
-		run_to_block(310);
+		System::run_to_block::<AllPalletsWithSystem>(310);
 		// 42 was successful, the period ended as this was the 4th scheduled periodic run so 42 must
 		// have been discarded
 		assert_eq!(Agenda::<Test>::iter().count(), 0);
@@ -1057,7 +1057,7 @@ fn reschedule_works() {
 			(4, 0)
 		);
 
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 
 		assert_eq!(Scheduler::do_reschedule((4, 0), DispatchTime::At(6)).unwrap(), (6, 0));
@@ -1067,13 +1067,13 @@ fn reschedule_works() {
 			Error::<Test>::RescheduleNoChange
 		);
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert!(logger::log().is_empty());
 
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 	});
 }
@@ -1097,7 +1097,7 @@ fn reschedule_named_works() {
 			(4, 0)
 		);
 
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 
 		assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(6)).unwrap(), (6, 0));
@@ -1107,13 +1107,13 @@ fn reschedule_named_works() {
 			Error::<Test>::RescheduleNoChange
 		);
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert!(logger::log().is_empty());
 
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 	});
 }
@@ -1137,16 +1137,16 @@ fn reschedule_named_periodic_works() {
 			(4, 0)
 		);
 
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 
 		assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(5)).unwrap(), (5, 0));
 		assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(6)).unwrap(), (6, 0));
 
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		assert!(logger::log().is_empty());
 
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 
 		assert_eq!(
@@ -1154,16 +1154,16 @@ fn reschedule_named_periodic_works() {
 			(10, 0)
 		);
 
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]);
 
-		run_to_block(13);
+		System::run_to_block::<AllPalletsWithSystem>(13);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]);
 
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]);
 	});
 }
@@ -1197,11 +1197,11 @@ fn cancel_named_scheduling_works_with_normal_cancel() {
 			.unwrap(),
 		)
 		.unwrap();
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 		assert_ok!(Scheduler::do_cancel_named(None, [1u8; 32]));
 		assert_ok!(Scheduler::do_cancel(None, i));
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert!(logger::log().is_empty());
 	});
 }
@@ -1251,13 +1251,13 @@ fn cancel_named_periodic_scheduling_works() {
 			.unwrap(),
 		)
 		.unwrap();
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		assert_ok!(Scheduler::do_cancel_named(None, [1u8; 32]));
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]);
 	});
 }
@@ -1283,9 +1283,9 @@ fn scheduler_respects_weight_limits() {
 			Preimage::bound(call).unwrap(),
 		));
 		// 69 and 42 do not fit together
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]);
 	});
 }
@@ -1316,26 +1316,26 @@ fn retry_respects_weight_limits() {
 		// set a retry config for 20 for 10 retries every block
 		assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 1));
 		// 20 should fail and be retried later
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert!(Agenda::<Test>::get(5)[0].is_some());
 		assert!(Agenda::<Test>::get(8)[0].is_some());
 		assert_eq!(Retries::<Test>::iter().count(), 1);
 		assert!(logger::log().is_empty());
 		// 20 still fails but is scheduled next block together with 42
-		run_to_block(7);
+		System::run_to_block::<AllPalletsWithSystem>(7);
 		assert_eq!(Agenda::<Test>::get(8).len(), 2);
 		assert_eq!(Retries::<Test>::iter().count(), 1);
 		assert!(logger::log().is_empty());
 		// 20 and 42 do not fit together
 		// 42 is executed as it was first in the queue
 		// 20 is still on the 8th block's agenda
-		run_to_block(8);
+		System::run_to_block::<AllPalletsWithSystem>(8);
 		assert!(Agenda::<Test>::get(8)[0].is_none());
 		assert!(Agenda::<Test>::get(8)[1].is_some());
 		assert_eq!(Retries::<Test>::iter().count(), 1);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 		// 20 is executed and the schedule is cleared
-		run_to_block(9);
+		System::run_to_block::<AllPalletsWithSystem>(9);
 		assert_eq!(Agenda::<Test>::iter().count(), 0);
 		assert_eq!(Retries::<Test>::iter().count(), 0);
 		assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 20u32)]);
@@ -1386,7 +1386,7 @@ fn try_schedule_retry_respects_weight_limits() {
 		// set a retry config for 20 for 10 retries every block
 		assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 1));
 		// 20 should fail and, because of insufficient weight, it should not be scheduled again
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		// nothing else should be scheduled
 		assert_eq!(Agenda::<Test>::iter().count(), 0);
 		assert_eq!(Retries::<Test>::iter().count(), 0);
@@ -1415,7 +1415,7 @@ fn scheduler_does_not_delete_permanently_overweight_call() {
 			Preimage::bound(call).unwrap(),
 		));
 		// Never executes.
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![]);
 
 		// Assert the `PermanentlyOverweight` event.
@@ -1445,7 +1445,7 @@ fn scheduler_handles_periodic_failure() {
 			bound.clone(),
 		));
 		// Executes 5 times till block 20.
-		run_to_block(20);
+		System::run_to_block::<AllPalletsWithSystem>(20);
 		assert_eq!(logger::log().len(), 5);
 
 		// Block 28 will already be full.
@@ -1460,7 +1460,7 @@ fn scheduler_handles_periodic_failure() {
 		}
 
 		// Going to block 24 will emit a `PeriodicFailed` event.
-		run_to_block(24);
+		System::run_to_block::<AllPalletsWithSystem>(24);
 		assert_eq!(logger::log().len(), 6);
 
 		assert_eq!(
@@ -1498,7 +1498,7 @@ fn scheduler_handles_periodic_unavailable_preimage() {
 		assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), call.encode()));
 
 		// Executes 1 times till block 4.
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_eq!(logger::log().len(), 1);
 
 		// As the public api doesn't support to remove a noted preimage, we need to first unnote it
@@ -1508,7 +1508,7 @@ fn scheduler_handles_periodic_unavailable_preimage() {
 		Preimage::request(&hash);
 
 		// Does not ever execute again.
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log().len(), 1);
 
 		// The preimage is not requested anymore.
@@ -1536,7 +1536,7 @@ fn scheduler_respects_priority_ordering() {
 			root(),
 			Preimage::bound(call).unwrap(),
 		));
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_eq!(logger::log(), vec![(root(), 69u32), (root(), 42u32)]);
 	});
 }
@@ -1571,10 +1571,10 @@ fn scheduler_respects_priority_ordering_with_soft_deadlines() {
 		));
 
 		// 2600 does not fit with 69 or 42, but has higher priority, so will go through
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_eq!(logger::log(), vec![(root(), 2600u32)]);
 		// 69 and 42 fit together
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]);
 	});
 }
@@ -1701,14 +1701,14 @@ fn root_calls_works() {
 			Scheduler::schedule_named(RuntimeOrigin::root(), [1u8; 32], 4, None, 127, call,)
 		);
 		assert_ok!(Scheduler::schedule(RuntimeOrigin::root(), 4, None, 127, call2));
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		// Scheduled calls are in the agenda.
 		assert_eq!(Agenda::<Test>::get(4).len(), 2);
 		assert!(logger::log().is_empty());
 		assert_ok!(Scheduler::cancel_named(RuntimeOrigin::root(), [1u8; 32]));
 		assert_ok!(Scheduler::cancel(RuntimeOrigin::root(), 4, 1));
 		// Scheduled calls are made NONE, so should not effect state
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert!(logger::log().is_empty());
 	});
 }
@@ -1716,7 +1716,7 @@ fn root_calls_works() {
 #[test]
 fn fails_to_schedule_task_in_the_past() {
 	new_test_ext().execute_with(|| {
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 
 		let call1 = Box::new(RuntimeCall::Logger(LoggerCall::log {
 			i: 69,
@@ -1768,14 +1768,14 @@ fn should_use_origin() {
 			call,
 		));
 		assert_ok!(Scheduler::schedule(system::RawOrigin::Signed(1).into(), 4, None, 127, call2,));
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		// Scheduled calls are in the agenda.
 		assert_eq!(Agenda::<Test>::get(4).len(), 2);
 		assert!(logger::log().is_empty());
 		assert_ok!(Scheduler::cancel_named(system::RawOrigin::Signed(1).into(), [1u8; 32]));
 		assert_ok!(Scheduler::cancel(system::RawOrigin::Signed(1).into(), 4, 1));
 		// Scheduled calls are made NONE, so should not effect state
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert!(logger::log().is_empty());
 	});
 }
@@ -1829,7 +1829,7 @@ fn should_check_origin_for_cancel() {
 			call,
 		));
 		assert_ok!(Scheduler::schedule(system::RawOrigin::Signed(1).into(), 4, None, 127, call2,));
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		// Scheduled calls are in the agenda.
 		assert_eq!(Agenda::<Test>::get(4).len(), 2);
 		assert!(logger::log().is_empty());
@@ -1840,7 +1840,7 @@ fn should_check_origin_for_cancel() {
 		assert_noop!(Scheduler::cancel(system::RawOrigin::Signed(2).into(), 4, 1), BadOrigin);
 		assert_noop!(Scheduler::cancel_named(system::RawOrigin::Root.into(), [1u8; 32]), BadOrigin);
 		assert_noop!(Scheduler::cancel(system::RawOrigin::Root.into(), 4, 1), BadOrigin);
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		assert_eq!(
 			logger::log(),
 			vec![
@@ -1888,17 +1888,17 @@ fn cancel_removes_retry_entry() {
 		// task 42 will be retried 10 times every 3 blocks
 		assert_ok!(Scheduler::set_retry_named(root().into(), [1u8; 32], 10, 1));
 		assert_eq!(Retries::<Test>::iter().count(), 2);
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 		assert_eq!(Agenda::<Test>::get(4).len(), 2);
 		// both tasks fail
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert!(Agenda::<Test>::get(4).is_empty());
 		// 42 and 20 are rescheduled for next block
 		assert_eq!(Agenda::<Test>::get(5).len(), 2);
 		assert!(logger::log().is_empty());
 		// 42 and 20 still fail
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		// 42 and 20 rescheduled for next block
 		assert_eq!(Agenda::<Test>::get(6).len(), 2);
 		assert_eq!(Retries::<Test>::iter().count(), 2);
@@ -1909,7 +1909,7 @@ fn cancel_removes_retry_entry() {
 		assert!(Scheduler::cancel(root().into(), 6, 0).is_ok());
 
 		// 20 is removed, 42 still fails
-		run_to_block(6);
+		System::run_to_block::<AllPalletsWithSystem>(6);
 		// 42 rescheduled for next block
 		assert_eq!(Agenda::<Test>::get(7).len(), 1);
 		// 20's retry entry is removed
@@ -1920,7 +1920,7 @@ fn cancel_removes_retry_entry() {
 		assert!(Scheduler::cancel(root().into(), 7, 0).is_ok());
 
 		// both tasks are canceled, everything is removed now
-		run_to_block(7);
+		System::run_to_block::<AllPalletsWithSystem>(7);
 		assert!(Agenda::<Test>::get(8).is_empty());
 		assert_eq!(Retries::<Test>::iter().count(), 0);
 	});
@@ -1963,7 +1963,7 @@ fn cancel_retries_works() {
 		// task 42 will be retried 10 times every 3 blocks
 		assert_ok!(Scheduler::set_retry_named(root().into(), [1u8; 32], 10, 1));
 		assert_eq!(Retries::<Test>::iter().count(), 2);
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		assert!(logger::log().is_empty());
 		assert_eq!(Agenda::<Test>::get(4).len(), 2);
 		// cancel the retry config for 20
@@ -1972,7 +1972,7 @@ fn cancel_retries_works() {
 		// cancel the retry config for 42
 		assert_ok!(Scheduler::cancel_retry_named(root().into(), [1u8; 32]));
 		assert_eq!(Retries::<Test>::iter().count(), 0);
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		// both tasks failed and there are no more retries, so they are evicted
 		assert_eq!(Agenda::<Test>::get(4).len(), 0);
 		assert_eq!(Retries::<Test>::iter().count(), 0);
@@ -2287,7 +2287,7 @@ fn postponed_named_task_cannot_be_rescheduled() {
 		assert!(Lookup::<Test>::contains_key(name));
 
 		// Run to a very large block.
-		run_to_block(10);
+		System::run_to_block::<AllPalletsWithSystem>(10);
 
 		// It was not executed.
 		assert!(logger::log().is_empty());
@@ -2321,7 +2321,7 @@ fn postponed_named_task_cannot_be_rescheduled() {
 		// Finally add the preimage.
 		assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode()));
 
-		run_to_block(1000);
+		System::run_to_block::<AllPalletsWithSystem>(1000);
 		// It did not execute.
 		assert!(logger::log().is_empty());
 		assert!(!Preimage::is_requested(&hash));
@@ -2357,14 +2357,14 @@ fn scheduler_v3_anon_basic_works() {
 		)
 		.unwrap();
 
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		// Did not execute till block 3.
 		assert!(logger::log().is_empty());
 		// Executes in block 4.
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 		// ... but not again.
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 	});
 }
@@ -2389,7 +2389,7 @@ fn scheduler_v3_anon_cancel_works() {
 		// Cancel the call.
 		assert_ok!(<Scheduler as Anon<_, _, _>>::cancel(address));
 		// It did not get executed.
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert!(logger::log().is_empty());
 		// Cannot cancel again.
 		assert_err!(<Scheduler as Anon<_, _, _>>::cancel(address), DispatchError::Unavailable);
@@ -2413,7 +2413,7 @@ fn scheduler_v3_anon_reschedule_works() {
 		)
 		.unwrap();
 
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		// Did not execute till block 3.
 		assert!(logger::log().is_empty());
 
@@ -2430,9 +2430,9 @@ fn scheduler_v3_anon_reschedule_works() {
 		// Re-schedule to block 5.
 		assert_ok!(<Scheduler as Anon<_, _, _>>::reschedule(address, DispatchTime::At(5)));
 		// Scheduled for block 5.
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert!(logger::log().is_empty());
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		// Does execute in block 5.
 		assert_eq!(logger::log(), vec![(root(), 42)]);
 		// Cannot re-schedule executed task.
@@ -2461,14 +2461,14 @@ fn scheduler_v3_anon_next_schedule_time_works() {
 		)
 		.unwrap();
 
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		// Did not execute till block 3.
 		assert!(logger::log().is_empty());
 
 		// Scheduled for block 4.
 		assert_eq!(<Scheduler as Anon<_, _, _>>::next_dispatch_time(address), Ok(4));
 		// Block 4 executes it.
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_eq!(logger::log(), vec![(root(), 42)]);
 
 		// It has no dispatch time anymore.
@@ -2498,7 +2498,7 @@ fn scheduler_v3_anon_reschedule_and_next_schedule_time_work() {
 		)
 		.unwrap();
 
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		// Did not execute till block 3.
 		assert!(logger::log().is_empty());
 
@@ -2512,10 +2512,10 @@ fn scheduler_v3_anon_reschedule_and_next_schedule_time_work() {
 		assert_eq!(<Scheduler as Anon<_, _, _>>::next_dispatch_time(address), Ok(5));
 
 		// Block 4 does nothing.
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert!(logger::log().is_empty());
 		// Block 5 executes it.
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		assert_eq!(logger::log(), vec![(root(), 42)]);
 	});
 }
@@ -2548,7 +2548,7 @@ fn scheduler_v3_anon_schedule_agenda_overflows() {
 			DispatchError::Exhausted
 		);
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		// All scheduled calls are executed.
 		assert_eq!(logger::log().len() as u32, max);
 	});
@@ -2597,7 +2597,7 @@ fn scheduler_v3_anon_cancel_and_schedule_fills_holes() {
 			assert_eq!(i, index);
 		}
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		// Maximum number of calls are executed.
 		assert_eq!(logger::log().len() as u32, max);
 	});
@@ -2643,7 +2643,7 @@ fn scheduler_v3_anon_reschedule_fills_holes() {
 			assert_eq!(new, want);
 		}
 
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		// Maximum number of calls are executed.
 		assert_eq!(logger::log().len() as u32, max);
 	});
@@ -2670,14 +2670,14 @@ fn scheduler_v3_named_basic_works() {
 		)
 		.unwrap();
 
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		// Did not execute till block 3.
 		assert!(logger::log().is_empty());
 		// Executes in block 4.
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 		// ... but not again.
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert_eq!(logger::log(), vec![(root(), 42u32)]);
 	});
 }
@@ -2705,7 +2705,7 @@ fn scheduler_v3_named_cancel_named_works() {
 		// Cancel the call by name.
 		assert_ok!(<Scheduler as Named<_, _, _>>::cancel_named(name));
 		// It did not get executed.
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert!(logger::log().is_empty());
 		// Cannot cancel again.
 		assert_noop!(<Scheduler as Named<_, _, _>>::cancel_named(name), DispatchError::Unavailable);
@@ -2735,7 +2735,7 @@ fn scheduler_v3_named_cancel_without_name_works() {
 		// Cancel the call by address.
 		assert_ok!(<Scheduler as Anon<_, _, _>>::cancel(address));
 		// It did not get executed.
-		run_to_block(100);
+		System::run_to_block::<AllPalletsWithSystem>(100);
 		assert!(logger::log().is_empty());
 		// Cannot cancel again.
 		assert_err!(<Scheduler as Anon<_, _, _>>::cancel(address), DispatchError::Unavailable);
@@ -2762,7 +2762,7 @@ fn scheduler_v3_named_reschedule_named_works() {
 		)
 		.unwrap();
 
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		// Did not execute till block 3.
 		assert!(logger::log().is_empty());
 
@@ -2784,9 +2784,9 @@ fn scheduler_v3_named_reschedule_named_works() {
 		// Re-schedule to block 5.
 		assert_ok!(<Scheduler as Named<_, _, _>>::reschedule_named(name, DispatchTime::At(5)));
 		// Scheduled for block 5.
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert!(logger::log().is_empty());
-		run_to_block(5);
+		System::run_to_block::<AllPalletsWithSystem>(5);
 		// Does execute in block 5.
 		assert_eq!(logger::log(), vec![(root(), 42)]);
 		// Cannot re-schedule executed task.
@@ -2822,7 +2822,7 @@ fn scheduler_v3_named_next_schedule_time_works() {
 		)
 		.unwrap();
 
-		run_to_block(3);
+		System::run_to_block::<AllPalletsWithSystem>(3);
 		// Did not execute till block 3.
 		assert!(logger::log().is_empty());
 
@@ -2831,7 +2831,7 @@ fn scheduler_v3_named_next_schedule_time_works() {
 		// Also works by address.
 		assert_eq!(<Scheduler as Anon<_, _, _>>::next_dispatch_time(address), Ok(4));
 		// Block 4 executes it.
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 		assert_eq!(logger::log(), vec![(root(), 42)]);
 
 		// It has no dispatch time anymore.
@@ -3025,7 +3025,7 @@ fn unavailable_call_is_detected() {
 		assert!(Preimage::is_requested(&hash));
 
 		// Executes in block 4.
-		run_to_block(4);
+		System::run_to_block::<AllPalletsWithSystem>(4);
 
 		assert_eq!(
 			System::events().last().unwrap().event,
diff --git a/substrate/frame/society/src/mock.rs b/substrate/frame/society/src/mock.rs
index 3c27c08a106..8cb5dc82375 100644
--- a/substrate/frame/society/src/mock.rs
+++ b/substrate/frame/society/src/mock.rs
@@ -138,18 +138,6 @@ impl EnvBuilder {
 	}
 }
 
-/// Run until a particular block.
-pub fn run_to_block(n: u64) {
-	while System::block_number() < n {
-		if System::block_number() > 1 {
-			System::on_finalize(System::block_number());
-		}
-		System::set_block_number(System::block_number() + 1);
-		System::on_initialize(System::block_number());
-		Society::on_initialize(System::block_number());
-	}
-}
-
 /// Creates a bid struct using input parameters.
 pub fn bid<AccountId, Balance>(
 	who: AccountId,
@@ -173,12 +161,12 @@ pub fn candidacy<AccountId, Balance>(
 pub fn next_challenge() {
 	let challenge_period: u64 = <Test as Config>::ChallengePeriod::get();
 	let now = System::block_number();
-	run_to_block(now + challenge_period - now % challenge_period);
+	System::run_to_block::<AllPalletsWithSystem>(now + challenge_period - now % challenge_period);
 }
 
 pub fn next_voting() {
 	if let Period::Voting { more, .. } = Society::period() {
-		run_to_block(System::block_number() + more);
+		System::run_to_block::<AllPalletsWithSystem>(System::block_number() + more);
 	}
 }
 
@@ -235,8 +223,11 @@ pub fn conclude_intake(allow_resignation: bool, judge_intake: Option<bool>) {
 pub fn next_intake() {
 	let claim_period: u64 = <Test as Config>::ClaimPeriod::get();
 	match Society::period() {
-		Period::Voting { more, .. } => run_to_block(System::block_number() + more + claim_period),
-		Period::Claim { more, .. } => run_to_block(System::block_number() + more),
+		Period::Voting { more, .. } => System::run_to_block::<AllPalletsWithSystem>(
+			System::block_number() + more + claim_period,
+		),
+		Period::Claim { more, .. } =>
+			System::run_to_block::<AllPalletsWithSystem>(System::block_number() + more),
 	}
 }
 
diff --git a/substrate/frame/society/src/tests.rs b/substrate/frame/society/src/tests.rs
index 2a13f99855b..22832f18b6f 100644
--- a/substrate/frame/society/src/tests.rs
+++ b/substrate/frame/society/src/tests.rs
@@ -272,7 +272,7 @@ fn bidding_works() {
 		// 40, now a member, can vote for 50
 		assert_ok!(Society::vote(Origin::signed(40), 50, true));
 		conclude_intake(true, None);
-		run_to_block(12);
+		System::run_to_block::<AllPalletsWithSystem>(12);
 		// 50 is now a member
 		assert_eq!(members(), vec![10, 30, 40, 50]);
 		// Pot is increased by 1000, and 500 is paid out. Total payout so far is 1200.
@@ -282,7 +282,7 @@ fn bidding_works() {
 		assert_eq!(candidacies(), vec![]);
 		assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around
 																// Next period
-		run_to_block(16);
+		System::run_to_block::<AllPalletsWithSystem>(16);
 		// Same members
 		assert_eq!(members(), vec![10, 30, 40, 50]);
 		// Pot is increased by 1000 again
@@ -294,7 +294,7 @@ fn bidding_works() {
 		// Candidate 60 is voted in.
 		assert_ok!(Society::vote(Origin::signed(50), 60, true));
 		conclude_intake(true, None);
-		run_to_block(20);
+		System::run_to_block::<AllPalletsWithSystem>(20);
 		// 60 joins as a member
 		assert_eq!(members(), vec![10, 30, 40, 50, 60]);
 		// Pay them
@@ -368,7 +368,7 @@ fn rejecting_skeptic_on_approved_is_punished() {
 		}
 		conclude_intake(true, None);
 		assert_eq!(Members::<Test>::get(10).unwrap().strikes, 0);
-		run_to_block(12);
+		System::run_to_block::<AllPalletsWithSystem>(12);
 		assert_eq!(members(), vec![10, 20, 30, 40]);
 		assert_eq!(Members::<Test>::get(skeptic).unwrap().strikes, 1);
 	});
@@ -418,7 +418,7 @@ fn slash_payout_works() {
 			Payouts::<Test>::get(20),
 			PayoutRecord { paid: 0, payouts: vec![(8, 500)].try_into().unwrap() }
 		);
-		run_to_block(8);
+		System::run_to_block::<AllPalletsWithSystem>(8);
 		// payout should be here, but 500 less
 		assert_ok!(Society::payout(RuntimeOrigin::signed(20)));
 		assert_eq!(Balances::free_balance(20), 550);
@@ -1315,7 +1315,7 @@ fn drop_candidate_works() {
 		assert_ok!(Society::vote(Origin::signed(10), 40, false));
 		assert_ok!(Society::vote(Origin::signed(20), 40, false));
 		assert_ok!(Society::vote(Origin::signed(30), 40, false));
-		run_to_block(12);
+		System::run_to_block::<AllPalletsWithSystem>(12);
 		assert_ok!(Society::drop_candidate(Origin::signed(50), 40));
 		// 40 candidacy has gone.
 		assert_eq!(candidates(), vec![]);
diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs
index f79a52bc6c5..e3e58fc01b5 100644
--- a/substrate/frame/src/lib.rs
+++ b/substrate/frame/src/lib.rs
@@ -325,7 +325,7 @@ pub mod testing_prelude {
 		assert_storage_noop, hypothetically, storage_alias,
 	};
 
-	pub use frame_system::{self, mocking::*};
+	pub use frame_system::{self, mocking::*, RunToBlockHooks};
 
 	#[deprecated(note = "Use `frame::testing_prelude::TestState` instead.")]
 	pub use sp_io::TestExternalities;
diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs
index df8cb38e8b3..769b84826b4 100644
--- a/substrate/frame/staking/src/mock.rs
+++ b/substrate/frame/staking/src/mock.rs
@@ -25,7 +25,7 @@ use frame_election_provider_support::{
 use frame_support::{
 	assert_ok, derive_impl, ord_parameter_types, parameter_types,
 	traits::{
-		ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, LockableCurrency,
+		ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Imbalance, LockableCurrency,
 		OnUnbalanced, OneSessionHandler, WithdrawReasons,
 	},
 	weights::constants::RocksDbWeight,
@@ -155,7 +155,7 @@ impl pallet_session::historical::Config for Test {
 }
 impl pallet_authorship::Config for Test {
 	type FindAuthor = Author11;
-	type EventHandler = Pallet<Test>;
+	type EventHandler = ();
 }
 
 impl pallet_timestamp::Config for Test {
@@ -544,13 +544,10 @@ impl ExtBuilder {
 		let mut ext = sp_io::TestExternalities::from(storage);
 
 		if self.initialize_first_session {
-			// We consider all test to start after timestamp is initialized This must be ensured by
-			// having `timestamp::on_initialize` called before `staking::on_initialize`. Also, if
-			// session length is 1, then it is already triggered.
 			ext.execute_with(|| {
-				System::set_block_number(1);
-				Session::on_initialize(1);
-				<Staking as Hooks<u64>>::on_initialize(1);
+				run_to_block(1);
+
+				// Force reset the timestamp to the initial timestamp for easy testing.
 				Timestamp::set_timestamp(INIT_TIMESTAMP);
 			});
 		}
@@ -618,33 +615,31 @@ pub(crate) fn bond_virtual_nominator(
 /// a block import/propose process where we first initialize the block, then execute some stuff (not
 /// in the function), and then finalize the block.
 pub(crate) fn run_to_block(n: BlockNumber) {
-	Staking::on_finalize(System::block_number());
-	for b in (System::block_number() + 1)..=n {
-		System::set_block_number(b);
-		Session::on_initialize(b);
-		<Staking as Hooks<u64>>::on_initialize(b);
-		Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP);
-		if b != n {
-			Staking::on_finalize(System::block_number());
-		}
-	}
+	System::run_to_block_with::<AllPalletsWithSystem>(
+		n,
+		frame_system::RunToBlockHooks::default().after_initialize(|bn| {
+			Timestamp::set_timestamp(bn * BLOCK_TIME + INIT_TIMESTAMP);
+		}),
+	);
 }
 
 /// Progresses from the current block number (whatever that may be) to the `P * session_index + 1`.
-pub(crate) fn start_session(session_index: SessionIndex) {
+pub(crate) fn start_session(end_session_idx: SessionIndex) {
+	let period = Period::get();
 	let end: u64 = if Offset::get().is_zero() {
-		(session_index as u64) * Period::get()
+		(end_session_idx as u64) * period
 	} else {
-		Offset::get() + (session_index.saturating_sub(1) as u64) * Period::get()
+		Offset::get() + (end_session_idx.saturating_sub(1) as u64) * period
 	};
+
 	run_to_block(end);
+
+	let curr_session_idx = Session::current_index();
+
 	// session must have progressed properly.
 	assert_eq!(
-		Session::current_index(),
-		session_index,
-		"current session index = {}, expected = {}",
-		Session::current_index(),
-		session_index,
+		curr_session_idx, end_session_idx,
+		"current session index = {curr_session_idx}, expected = {end_session_idx}",
 	);
 }
 
diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs
index 61323b70b33..1dc1a3928f2 100644
--- a/substrate/frame/state-trie-migration/src/lib.rs
+++ b/substrate/frame/state-trie-migration/src/lib.rs
@@ -1309,16 +1309,17 @@ mod mock {
 	pub(crate) fn run_to_block(n: u32) -> (H256, Weight) {
 		let mut root = Default::default();
 		let mut weight_sum = Weight::zero();
+
 		log::trace!(target: LOG_TARGET, "running from {:?} to {:?}", System::block_number(), n);
-		while System::block_number() < n {
-			System::set_block_number(System::block_number() + 1);
-			System::on_initialize(System::block_number());
 
-			weight_sum += StateTrieMigration::on_initialize(System::block_number());
+		System::run_to_block_with::<AllPalletsWithSystem>(
+			n,
+			frame_system::RunToBlockHooks::default().after_initialize(|bn| {
+				weight_sum += StateTrieMigration::on_initialize(bn);
+				root = *System::finalize().state_root();
+			}),
+		);
 
-			root = *System::finalize().state_root();
-			System::on_finalize(System::block_number());
-		}
 		(root, weight_sum)
 	}
 }
diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs
index 894e1898ed1..f2bb5e290c9 100644
--- a/substrate/frame/system/src/lib.rs
+++ b/substrate/frame/system/src/lib.rs
@@ -1974,6 +1974,51 @@ impl<T: Config> Pallet<T> {
 			.collect::<_>()
 	}
 
+	/// Simulate the execution of a block sequence up to a specified height, injecting the
+	/// provided hooks at each block.
+	///
+	/// `on_finalize` is always called before `on_initialize` with the current block number.
+	/// `on_initalize` is always called with the next block number.
+	///
+	/// These hooks allows custom logic to be executed at each block at specific location.
+	/// For example, you might use one of them to set a timestamp for each block.
+	#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
+	pub fn run_to_block_with<AllPalletsWithSystem>(
+		n: BlockNumberFor<T>,
+		mut hooks: RunToBlockHooks<T>,
+	) where
+		AllPalletsWithSystem: frame_support::traits::OnInitialize<BlockNumberFor<T>>
+			+ frame_support::traits::OnFinalize<BlockNumberFor<T>>,
+	{
+		let mut bn = Self::block_number();
+
+		while bn < n {
+			// Skip block 0.
+			if !bn.is_zero() {
+				(hooks.before_finalize)(bn);
+				AllPalletsWithSystem::on_finalize(bn);
+				(hooks.after_finalize)(bn);
+			}
+
+			bn += One::one();
+
+			Self::set_block_number(bn);
+			(hooks.before_initialize)(bn);
+			AllPalletsWithSystem::on_initialize(bn);
+			(hooks.after_initialize)(bn);
+		}
+	}
+
+	/// Simulate the execution of a block sequence up to a specified height.
+	#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
+	pub fn run_to_block<AllPalletsWithSystem>(n: BlockNumberFor<T>)
+	where
+		AllPalletsWithSystem: frame_support::traits::OnInitialize<BlockNumberFor<T>>
+			+ frame_support::traits::OnFinalize<BlockNumberFor<T>>,
+	{
+		Self::run_to_block_with::<AllPalletsWithSystem>(n, Default::default());
+	}
+
 	/// Set the block number to something in particular. Can be used as an alternative to
 	/// `initialize` for tests that don't need to bother with the other environment entries.
 	#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
@@ -2347,6 +2392,72 @@ impl<T: Config> Lookup for ChainContext<T> {
 	}
 }
 
+/// Hooks for the [`Pallet::run_to_block_with`] function.
+#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
+pub struct RunToBlockHooks<'a, T>
+where
+	T: 'a + Config,
+{
+	before_initialize: Box<dyn 'a + FnMut(BlockNumberFor<T>)>,
+	after_initialize: Box<dyn 'a + FnMut(BlockNumberFor<T>)>,
+	before_finalize: Box<dyn 'a + FnMut(BlockNumberFor<T>)>,
+	after_finalize: Box<dyn 'a + FnMut(BlockNumberFor<T>)>,
+}
+
+#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
+impl<'a, T> RunToBlockHooks<'a, T>
+where
+	T: 'a + Config,
+{
+	/// Set the hook function logic before the initialization of the block.
+	pub fn before_initialize<F>(mut self, f: F) -> Self
+	where
+		F: 'a + FnMut(BlockNumberFor<T>),
+	{
+		self.before_initialize = Box::new(f);
+		self
+	}
+	/// Set the hook function logic after the initialization of the block.
+	pub fn after_initialize<F>(mut self, f: F) -> Self
+	where
+		F: 'a + FnMut(BlockNumberFor<T>),
+	{
+		self.after_initialize = Box::new(f);
+		self
+	}
+	/// Set the hook function logic before the finalization of the block.
+	pub fn before_finalize<F>(mut self, f: F) -> Self
+	where
+		F: 'a + FnMut(BlockNumberFor<T>),
+	{
+		self.before_finalize = Box::new(f);
+		self
+	}
+	/// Set the hook function logic after the finalization of the block.
+	pub fn after_finalize<F>(mut self, f: F) -> Self
+	where
+		F: 'a + FnMut(BlockNumberFor<T>),
+	{
+		self.after_finalize = Box::new(f);
+		self
+	}
+}
+
+#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
+impl<'a, T> Default for RunToBlockHooks<'a, T>
+where
+	T: Config,
+{
+	fn default() -> Self {
+		Self {
+			before_initialize: Box::new(|_| {}),
+			after_initialize: Box::new(|_| {}),
+			before_finalize: Box::new(|_| {}),
+			after_finalize: Box::new(|_| {}),
+		}
+	}
+}
+
 /// Prelude to be used alongside pallet macro, for ease of use.
 pub mod pallet_prelude {
 	pub use crate::{ensure_none, ensure_root, ensure_signed, ensure_signed_or_root};
diff --git a/substrate/frame/transaction-storage/src/mock.rs b/substrate/frame/transaction-storage/src/mock.rs
index 73174b73dba..84a77043d57 100644
--- a/substrate/frame/transaction-storage/src/mock.rs
+++ b/substrate/frame/transaction-storage/src/mock.rs
@@ -21,10 +21,7 @@ use crate::{
 	self as pallet_transaction_storage, TransactionStorageProof, DEFAULT_MAX_BLOCK_TRANSACTIONS,
 	DEFAULT_MAX_TRANSACTION_SIZE,
 };
-use frame_support::{
-	derive_impl,
-	traits::{ConstU32, OnFinalize, OnInitialize},
-};
+use frame_support::{derive_impl, traits::ConstU32};
 use sp_runtime::{traits::IdentityLookup, BuildStorage};
 
 pub type Block = frame_system::mocking::MockBlock<Test>;
@@ -80,15 +77,13 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	t.into()
 }
 
-pub fn run_to_block(n: u64, f: impl Fn() -> Option<TransactionStorageProof>) {
-	while System::block_number() < n {
-		if let Some(proof) = f() {
-			TransactionStorage::check_proof(RuntimeOrigin::none(), proof).unwrap();
-		}
-		TransactionStorage::on_finalize(System::block_number());
-		System::on_finalize(System::block_number());
-		System::set_block_number(System::block_number() + 1);
-		System::on_initialize(System::block_number());
-		TransactionStorage::on_initialize(System::block_number());
-	}
+pub fn run_to_block(n: u64, f: impl Fn() -> Option<TransactionStorageProof> + 'static) {
+	System::run_to_block_with::<AllPalletsWithSystem>(
+		n,
+		frame_system::RunToBlockHooks::default().before_finalize(|_| {
+			if let Some(proof) = f() {
+				TransactionStorage::check_proof(RuntimeOrigin::none(), proof).unwrap();
+			}
+		}),
+	);
 }
-- 
GitLab


From ef064a357c97c2635f05295aac1698a91fa2f4fd Mon Sep 17 00:00:00 2001
From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com>
Date: Wed, 15 Jan 2025 13:04:37 +0200
Subject: [PATCH 059/116] req-resp/litep2p: Reject inbound requests from banned
 peers (#7158)

This PR rejects inbound requests from banned peers (reputation is below
the banned threshold).

This mirrors the request-response implementation from the libp2p side.
I won't expect this to get triggered too often, but we'll monitor this
metric.

While at it, have registered a new inbound failure metric to have
visibility into this.

Discovered during the investigation of:
https://github.com/paritytech/polkadot-sdk/issues/7076#issuecomment-2589613046

cc @paritytech/networking

---------

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>
---
 prdoc/pr_7158.prdoc                           | 12 +++++++++
 .../src/litep2p/shim/request_response/mod.rs  | 25 ++++++++++++++-----
 2 files changed, 31 insertions(+), 6 deletions(-)
 create mode 100644 prdoc/pr_7158.prdoc

diff --git a/prdoc/pr_7158.prdoc b/prdoc/pr_7158.prdoc
new file mode 100644
index 00000000000..e113a7fdcd1
--- /dev/null
+++ b/prdoc/pr_7158.prdoc
@@ -0,0 +1,12 @@
+title: Reject litep2p inbound requests from banned peers
+
+doc:
+  - audience: Node Dev
+    description: |
+      This PR rejects inbound requests from banned peers (reputation is below the banned threshold).
+      This mirrors the request-response implementation from the libp2p side.
+      While at it, have registered a new inbound failure metric to have visibility into this.
+
+crates:
+- name: sc-network
+  bump: patch
diff --git a/substrate/client/network/src/litep2p/shim/request_response/mod.rs b/substrate/client/network/src/litep2p/shim/request_response/mod.rs
index 146f2e4add9..690d5a31e6a 100644
--- a/substrate/client/network/src/litep2p/shim/request_response/mod.rs
+++ b/substrate/client/network/src/litep2p/shim/request_response/mod.rs
@@ -273,6 +273,13 @@ impl RequestResponseProtocol {
 		request_id: RequestId,
 		request: Vec<u8>,
 	) {
+		log::trace!(
+			target: LOG_TARGET,
+			"{}: request received from {peer:?} ({fallback:?} {request_id:?}), request size {:?}",
+			self.protocol,
+			request.len(),
+		);
+
 		let Some(inbound_queue) = &self.inbound_queue else {
 			log::trace!(
 				target: LOG_TARGET,
@@ -284,12 +291,18 @@ impl RequestResponseProtocol {
 			return;
 		};
 
-		log::trace!(
-			target: LOG_TARGET,
-			"{}: request received from {peer:?} ({fallback:?} {request_id:?}), request size {:?}",
-			self.protocol,
-			request.len(),
-		);
+		if self.peerstore_handle.is_banned(&peer.into()) {
+			log::trace!(
+				target: LOG_TARGET,
+				"{}: rejecting inbound request from banned {peer:?} ({request_id:?})",
+				self.protocol,
+			);
+
+			self.handle.reject_request(request_id);
+			self.metrics.register_inbound_request_failure("banned-peer");
+			return;
+		}
+
 		let (tx, rx) = oneshot::channel();
 
 		match inbound_queue.try_send(IncomingRequest {
-- 
GitLab


From 88f898e74423ab32806f44c77c925b0081efa2cc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Michael=20M=C3=BCller?= <mich@elmueller.net>
Date: Wed, 15 Jan 2025 14:14:00 +0100
Subject: [PATCH 060/116] [pallet-revive] Fix `caller_is_root` return value
 (#7086)

Closes https://github.com/paritytech/polkadot-sdk/issues/6767.

The return type of the host function `caller_is_root` was denoted as
`u32` in `pallet_revive_uapi`. This PR fixes the return type to `bool`.

As a drive-by, the PR re-exports `pallet_revive::exec::Origin` to extend
what can be tested externally.

---------

Co-authored-by: Cyrill Leutwiler <bigcyrill@hotmail.com>
---
 prdoc/pr_7086.prdoc                             | 11 +++++++++++
 substrate/frame/revive/src/exec.rs              |  2 +-
 substrate/frame/revive/src/gas.rs               |  2 +-
 substrate/frame/revive/src/lib.rs               |  4 ++--
 substrate/frame/revive/uapi/src/host.rs         |  2 +-
 substrate/frame/revive/uapi/src/host/riscv64.rs |  5 +++--
 6 files changed, 19 insertions(+), 7 deletions(-)
 create mode 100644 prdoc/pr_7086.prdoc

diff --git a/prdoc/pr_7086.prdoc b/prdoc/pr_7086.prdoc
new file mode 100644
index 00000000000..55fed9bca3e
--- /dev/null
+++ b/prdoc/pr_7086.prdoc
@@ -0,0 +1,11 @@
+title: '[pallet-revive] Fix `caller_is_root` return value'
+doc:
+- audience: Runtime Dev
+  description: The return type of the host function `caller_is_root` was denoted as `u32`
+    in `pallet_revive_uapi`. This PR fixes the return type to `bool`. As a drive-by, the
+    PR re-exports `pallet_revive::exec::Origin` to extend what can be tested externally.
+crates:
+- name: pallet-revive
+  bump: minor
+- name: pallet-revive-uapi
+  bump: major
diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs
index a6a25914976..478e96dc994 100644
--- a/substrate/frame/revive/src/exec.rs
+++ b/substrate/frame/revive/src/exec.rs
@@ -325,7 +325,7 @@ pub trait Ext: sealing::Sealed {
 	/// Returns `Err(InvalidImmutableAccess)` if called from a constructor.
 	fn get_immutable_data(&mut self) -> Result<ImmutableData, DispatchError>;
 
-	/// Set the the immutable data of the current contract.
+	/// Set the immutable data of the current contract.
 	///
 	/// Returns `Err(InvalidImmutableAccess)` if not called from a constructor.
 	///
diff --git a/substrate/frame/revive/src/gas.rs b/substrate/frame/revive/src/gas.rs
index 9aad84e6920..5c30a0a5100 100644
--- a/substrate/frame/revive/src/gas.rs
+++ b/substrate/frame/revive/src/gas.rs
@@ -89,7 +89,7 @@ pub struct RefTimeLeft(u64);
 
 /// Resource that needs to be synced to the executor.
 ///
-/// Wrapped to make sure that the resource will be synced back the the executor.
+/// Wrapped to make sure that the resource will be synced back to the executor.
 #[must_use]
 pub struct Syncable(polkavm::Gas);
 
diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs
index 04bce264a18..bdb4b92edd9 100644
--- a/substrate/frame/revive/src/lib.rs
+++ b/substrate/frame/revive/src/lib.rs
@@ -45,7 +45,7 @@ use crate::{
 		runtime::{gas_from_fee, GAS_PRICE},
 		GasEncoder, GenericTransaction,
 	},
-	exec::{AccountIdOf, ExecError, Executable, Ext, Key, Origin, Stack as ExecStack},
+	exec::{AccountIdOf, ExecError, Executable, Ext, Key, Stack as ExecStack},
 	gas::GasMeter,
 	storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager},
 	wasm::{CodeInfo, RuntimeCosts, WasmBlob},
@@ -84,7 +84,7 @@ use sp_runtime::{
 pub use crate::{
 	address::{create1, create2, AccountId32Mapper, AddressMapper},
 	debug::Tracing,
-	exec::MomentOf,
+	exec::{MomentOf, Origin},
 	pallet::*,
 };
 pub use primitives::*;
diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs
index eced4843b55..d90c0f45205 100644
--- a/substrate/frame/revive/uapi/src/host.rs
+++ b/substrate/frame/revive/uapi/src/host.rs
@@ -488,7 +488,7 @@ pub trait HostFn: private::Sealed {
 	/// A return value of `true` indicates that this contract is being called by a root origin,
 	/// and `false` indicates that the caller is a signed origin.
 	#[unstable_hostfn]
-	fn caller_is_root() -> u32;
+	fn caller_is_root() -> bool;
 
 	/// Clear the value at the given key in the contract storage.
 	///
diff --git a/substrate/frame/revive/uapi/src/host/riscv64.rs b/substrate/frame/revive/uapi/src/host/riscv64.rs
index 6fdda86892d..c83be942a97 100644
--- a/substrate/frame/revive/uapi/src/host/riscv64.rs
+++ b/substrate/frame/revive/uapi/src/host/riscv64.rs
@@ -501,8 +501,9 @@ impl HostFn for HostFnImpl {
 	}
 
 	#[unstable_hostfn]
-	fn caller_is_root() -> u32 {
-		unsafe { sys::caller_is_root() }.into_u32()
+	fn caller_is_root() -> bool {
+		let ret_val = unsafe { sys::caller_is_root() };
+		ret_val.into_bool()
 	}
 
 	#[unstable_hostfn]
-- 
GitLab


From cb0d8544dc8828c7b5e7f6a5fc20ce8c6ef9bbb4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Alexandre=20R=2E=20Bald=C3=A9?= <alexandre.balde@parity.io>
Date: Wed, 15 Jan 2025 13:14:54 +0000
Subject: [PATCH 061/116] Remove 0 as a special case in gas/storage meters
 (#6890)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Closes #6846 .

---------

Signed-off-by: xermicus <cyrill@parity.io>
Co-authored-by: command-bot <>
Co-authored-by: Alexander Theißen <alex.theissen@me.com>
Co-authored-by: xermicus <cyrill@parity.io>
---
 .../people-westend/src/tests/governance.rs    |   2 +-
 prdoc/pr_6890.prdoc                           |  19 ++++
 .../frame/revive/fixtures/contracts/call.rs   |   8 +-
 .../contracts/call_diverging_out_len.rs       |  12 +--
 .../fixtures/contracts/call_return_code.rs    |   8 +-
 .../contracts/call_runtime_and_call.rs        |   8 +-
 .../contracts/call_with_flags_and_value.rs    |   8 +-
 .../fixtures/contracts/call_with_limit.rs     |   4 +-
 .../fixtures/contracts/caller_contract.rs     |  48 +++++-----
 .../contracts/chain_extension_temp_storage.rs |   8 +-
 .../fixtures/contracts/create1_with_value.rs  |  12 ++-
 .../contracts/create_storage_and_call.rs      |   8 +-
 .../create_storage_and_instantiate.rs         |   6 +-
 .../create_transient_storage_and_call.rs      |   8 +-
 .../fixtures/contracts/delegate_call.rs       |  10 +-
 .../contracts/delegate_call_deposit_limit.rs  |  10 +-
 .../contracts/delegate_call_simple.rs         |  10 +-
 .../contracts/destroy_and_transfer.rs         |  18 ++--
 .../frame/revive/fixtures/contracts/drain.rs  |   2 +-
 .../contracts/instantiate_return_code.rs      |   7 +-
 .../contracts/locking_delegate_dependency.rs  |  10 +-
 .../frame/revive/fixtures/contracts/origin.rs |   6 +-
 .../fixtures/contracts/read_only_call.rs      |   8 +-
 .../revive/fixtures/contracts/recurse.rs      |   8 +-
 .../fixtures/contracts/return_data_api.rs     |  24 +++--
 .../fixtures/contracts/self_destruct.rs       |   8 +-
 .../contracts/transfer_return_code.rs         |   2 +-
 substrate/frame/revive/fixtures/src/lib.rs    |   2 +-
 .../rpc/examples/js/pvm/FlipperCaller.polkavm | Bin 4532 -> 4584 bytes
 .../rpc/examples/js/pvm/PiggyBank.polkavm     | Bin 5062 -> 5088 bytes
 .../frame/revive/src/benchmarking/mod.rs      |  14 +--
 substrate/frame/revive/src/exec.rs            |  60 +++++-------
 substrate/frame/revive/src/gas.rs             |  74 +++++++++++----
 substrate/frame/revive/src/primitives.rs      |   2 +-
 substrate/frame/revive/src/storage/meter.rs   |  89 ++++++++++--------
 substrate/frame/revive/src/tests.rs           |  72 +++++++-------
 substrate/frame/revive/src/wasm/runtime.rs    |   6 +-
 substrate/frame/revive/uapi/src/host.rs       |   6 +-
 .../frame/revive/uapi/src/host/riscv64.rs     |  12 +--
 39 files changed, 355 insertions(+), 264 deletions(-)
 create mode 100644 prdoc/pr_6890.prdoc

diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs
index ea438f80552..3b1779e40b6 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/governance.rs
@@ -396,7 +396,7 @@ fn relay_commands_add_remove_username_authority() {
 		);
 	});
 
-	// Now, remove the username authority with another priviledged XCM call.
+	// Now, remove the username authority with another privileged XCM call.
 	Westend::execute_with(|| {
 		type Runtime = <Westend as Chain>::Runtime;
 		type RuntimeCall = <Westend as Chain>::RuntimeCall;
diff --git a/prdoc/pr_6890.prdoc b/prdoc/pr_6890.prdoc
new file mode 100644
index 00000000000..b22a339035d
--- /dev/null
+++ b/prdoc/pr_6890.prdoc
@@ -0,0 +1,19 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Alter semantic meaning of 0 in metering limits of EVM contract calls
+
+doc:
+  - audience: [ Runtime Dev, Runtime User ]
+    description: |
+      A limit of 0, for gas meters and storage meters, no longer has the meaning of unlimited metering.
+
+crates:
+  - name: pallet-revive
+    bump: patch
+  - name: pallet-revive-fixtures
+    bump: patch
+  - name: pallet-revive-uapi
+    bump: patch
+  - name: pallet-revive-eth-rpc
+    bump: patch
diff --git a/substrate/frame/revive/fixtures/contracts/call.rs b/substrate/frame/revive/fixtures/contracts/call.rs
index ee51548879d..7c4c0882c6b 100644
--- a/substrate/frame/revive/fixtures/contracts/call.rs
+++ b/substrate/frame/revive/fixtures/contracts/call.rs
@@ -38,10 +38,10 @@ pub extern "C" fn call() {
 	api::call(
 		uapi::CallFlags::empty(),
 		callee_addr,
-		0u64,       // How much ref_time to devote for the execution. 0 = all.
-		0u64,       // How much proof_size to devote for the execution. 0 = all.
-		None,       // No deposit limit.
-		&[0u8; 32], // Value transferred to the contract.
+		u64::MAX,       // How much ref_time to devote for the execution. u64::MAX = use all.
+		u64::MAX,       // How much proof_size to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32], // No deposit limit.
+		&[0u8; 32],     // Value transferred to the contract.
 		callee_input,
 		None,
 	)
diff --git a/substrate/frame/revive/fixtures/contracts/call_diverging_out_len.rs b/substrate/frame/revive/fixtures/contracts/call_diverging_out_len.rs
index 129adde2cec..9a8fe5f5f6c 100644
--- a/substrate/frame/revive/fixtures/contracts/call_diverging_out_len.rs
+++ b/substrate/frame/revive/fixtures/contracts/call_diverging_out_len.rs
@@ -42,9 +42,9 @@ fn assert_call<const N: usize>(callee_address: &[u8; 20], expected_output: [u8;
 	api::call(
 		uapi::CallFlags::ALLOW_REENTRY,
 		callee_address,
-		0u64,
-		0u64,
-		None,
+		u64::MAX,
+		u64::MAX,
+		&[u8::MAX; 32],
 		&[0u8; 32],
 		&[],
 		Some(output_buf_capped),
@@ -67,9 +67,9 @@ fn assert_instantiate<const N: usize>(expected_output: [u8; BUF_SIZE]) {
 
 	api::instantiate(
 		&code_hash,
-		0u64,
-		0u64,
-		None,
+		u64::MAX,
+		u64::MAX,
+		&[u8::MAX; 32],
 		&[0; 32],
 		&[0; 32],
 		None,
diff --git a/substrate/frame/revive/fixtures/contracts/call_return_code.rs b/substrate/frame/revive/fixtures/contracts/call_return_code.rs
index 2d13b9f7095..19b3ae3fdb2 100644
--- a/substrate/frame/revive/fixtures/contracts/call_return_code.rs
+++ b/substrate/frame/revive/fixtures/contracts/call_return_code.rs
@@ -42,10 +42,10 @@ pub extern "C" fn call() {
 	let err_code = match api::call(
 		uapi::CallFlags::empty(),
 		callee_addr,
-		0u64,                // How much ref_time to devote for the execution. 0 = all.
-		0u64,                // How much proof_size to devote for the execution. 0 = all.
-		None,                // No deposit limit.
-		value, 				 // Value transferred to the contract.
+		u64::MAX,                 // How much ref_time to devote for the execution. u64::MAX = use all.
+		u64::MAX,                 // How much proof_size to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32], // No deposit limit.
+		value,                    // Value transferred to the contract.
 		input,
 		None,
 	) {
diff --git a/substrate/frame/revive/fixtures/contracts/call_runtime_and_call.rs b/substrate/frame/revive/fixtures/contracts/call_runtime_and_call.rs
index 8c8aee96284..78b275459f0 100644
--- a/substrate/frame/revive/fixtures/contracts/call_runtime_and_call.rs
+++ b/substrate/frame/revive/fixtures/contracts/call_runtime_and_call.rs
@@ -42,10 +42,10 @@ pub extern "C" fn call() {
 	api::call(
 		uapi::CallFlags::empty(),
 		callee_addr,
-		0u64,       // How much ref_time to devote for the execution. 0 = all.
-		0u64,       // How much proof_size to devote for the execution. 0 = all.
-		None,       // No deposit limit.
-		&[0u8; 32], // Value transferred to the contract.
+		u64::MAX,                 // How much ref_time to devote for the execution. u64::MAX = use all.
+		u64::MAX,                 // How much proof_size to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32],           // No deposit limit.
+		&[0u8; 32],               // Value transferred to the contract.
 		callee_input,
 		None,
 	)
diff --git a/substrate/frame/revive/fixtures/contracts/call_with_flags_and_value.rs b/substrate/frame/revive/fixtures/contracts/call_with_flags_and_value.rs
index 330393e706e..155a4b41bd9 100644
--- a/substrate/frame/revive/fixtures/contracts/call_with_flags_and_value.rs
+++ b/substrate/frame/revive/fixtures/contracts/call_with_flags_and_value.rs
@@ -40,10 +40,10 @@ pub extern "C" fn call() {
 	api::call(
 		uapi::CallFlags::from_bits(flags).unwrap(),
 		callee_addr,
-		0u64,               // How much ref_time to devote for the execution. 0 = all.
-		0u64,               // How much proof_size to devote for the execution. 0 = all.
-		None,               // No deposit limit.
-		&u256_bytes(value), // Value transferred to the contract.
+		u64::MAX,                 // How much ref_time to devote for the execution. u64::MAX = use all.
+		u64::MAX,                 // How much proof_size to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32],           // No deposit limit.
+		&u256_bytes(value),       // Value transferred to the contract.
 		forwarded_input,
 		None,
 	)
diff --git a/substrate/frame/revive/fixtures/contracts/call_with_limit.rs b/substrate/frame/revive/fixtures/contracts/call_with_limit.rs
index 6ab892a6b7a..af5c301a353 100644
--- a/substrate/frame/revive/fixtures/contracts/call_with_limit.rs
+++ b/substrate/frame/revive/fixtures/contracts/call_with_limit.rs
@@ -43,8 +43,8 @@ pub extern "C" fn call() {
 		callee_addr,
 		ref_time,
 		proof_size,
-		None,       // No deposit limit.
-		&[0u8; 32], // value transferred to the contract.
+		&[u8::MAX; 32],   // No deposit limit.
+		&[0u8; 32],       // value transferred to the contract.
 		forwarded_input,
 		None,
 	)
diff --git a/substrate/frame/revive/fixtures/contracts/caller_contract.rs b/substrate/frame/revive/fixtures/contracts/caller_contract.rs
index edad43fae25..d042dc2c22a 100644
--- a/substrate/frame/revive/fixtures/contracts/caller_contract.rs
+++ b/substrate/frame/revive/fixtures/contracts/caller_contract.rs
@@ -42,9 +42,9 @@ pub extern "C" fn call() {
 	// Fail to deploy the contract since it returns a non-zero exit status.
 	let res = api::instantiate(
 		code_hash,
-		0u64, // How much ref_time weight to devote for the execution. 0 = all.
-		0u64, // How much proof_size weight to devote for the execution. 0 = all.
-		None, // No deposit limit.
+		u64::MAX,       // How much ref_time weight to devote for the execution. u64::MAX = use all.
+		u64::MAX,       // How much proof_size weight to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32], // No deposit limit.
 		&value,
 		&reverted_input,
 		None,
@@ -56,9 +56,9 @@ pub extern "C" fn call() {
 	// Fail to deploy the contract due to insufficient ref_time weight.
 	let res = api::instantiate(
 		code_hash,
-		1u64, // too little ref_time weight
-		0u64, // How much proof_size weight to devote for the execution. 0 = all.
-		None, // No deposit limit.
+		1u64,           // too little ref_time weight
+		u64::MAX,       // How much proof_size weight to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32], // No deposit limit.
 		&value,
 		&input,
 		None,
@@ -70,9 +70,9 @@ pub extern "C" fn call() {
 	// Fail to deploy the contract due to insufficient proof_size weight.
 	let res = api::instantiate(
 		code_hash,
-		0u64, // How much ref_time weight to devote for the execution. 0 = all.
-		1u64, // Too little proof_size weight
-		None, // No deposit limit.
+		u64::MAX,       // How much ref_time weight to devote for the execution. u64::MAX = use all.
+		1u64,           // Too little proof_size weight
+		&[u8::MAX; 32], // No deposit limit.
 		&value,
 		&input,
 		None,
@@ -86,9 +86,9 @@ pub extern "C" fn call() {
 
 	api::instantiate(
 		code_hash,
-		0u64, // How much ref_time weight to devote for the execution. 0 = all.
-		0u64, // How much proof_size weight to devote for the execution. 0 = all.
-		None, // No deposit limit.
+		u64::MAX,       // How much ref_time weight to devote for the execution. u64::MAX = use all.
+		u64::MAX,       // How much proof_size weight to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32], // No deposit limit.
 		&value,
 		&input,
 		Some(&mut callee),
@@ -101,9 +101,9 @@ pub extern "C" fn call() {
 	let res = api::call(
 		uapi::CallFlags::empty(),
 		&callee,
-		0u64, // How much ref_time weight to devote for the execution. 0 = all.
-		0u64, // How much proof_size weight to devote for the execution. 0 = all.
-		None, // No deposit limit.
+		u64::MAX,       // How much ref_time weight to devote for the execution. u64::MAX = use all.
+		u64::MAX,       // How much proof_size weight to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32], // No deposit limit.
 		&value,
 		&reverted_input,
 		None,
@@ -114,9 +114,9 @@ pub extern "C" fn call() {
 	let res = api::call(
 		uapi::CallFlags::empty(),
 		&callee,
-		1u64, // Too little ref_time weight.
-		0u64, // How much proof_size weight to devote for the execution. 0 = all.
-		None, // No deposit limit.
+		1u64,           // Too little ref_time weight.
+		u64::MAX,       // How much proof_size weight to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32], // No deposit limit.
 		&value,
 		&input,
 		None,
@@ -127,9 +127,9 @@ pub extern "C" fn call() {
 	let res = api::call(
 		uapi::CallFlags::empty(),
 		&callee,
-		0u64, // How much ref_time weight to devote for the execution. 0 = all.
-		1u64, // too little proof_size weight
-		None, // No deposit limit.
+		u64::MAX,       // How much ref_time weight to devote for the execution. u64::MAX = use all.
+		1u64,           // too little proof_size weight
+		&[u8::MAX; 32], // No deposit limit.
 		&value,
 		&input,
 		None,
@@ -141,9 +141,9 @@ pub extern "C" fn call() {
 	api::call(
 		uapi::CallFlags::empty(),
 		&callee,
-		0u64, // How much ref_time weight to devote for the execution. 0 = all.
-		0u64, // How much proof_size weight to devote for the execution. 0 = all.
-		None, // No deposit limit.
+		u64::MAX,       // How much ref_time weight to devote for the execution. u64::MAX = use all.
+		u64::MAX,       // How much proof_size weight to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32], // No deposit limit.
 		&value,
 		&input,
 		Some(&mut &mut output[..]),
diff --git a/substrate/frame/revive/fixtures/contracts/chain_extension_temp_storage.rs b/substrate/frame/revive/fixtures/contracts/chain_extension_temp_storage.rs
index 22d6c5b548d..9b76b9d39ee 100644
--- a/substrate/frame/revive/fixtures/contracts/chain_extension_temp_storage.rs
+++ b/substrate/frame/revive/fixtures/contracts/chain_extension_temp_storage.rs
@@ -54,10 +54,10 @@ pub extern "C" fn call() {
 		api::call(
 			uapi::CallFlags::ALLOW_REENTRY,
 			&addr,
-			0u64,       // How much ref_time to devote for the execution. 0 = all.
-			0u64,       // How much proof_size to devote for the execution. 0 = all.
-			None,       // No deposit limit.
-			&[0u8; 32], // Value transferred to the contract.
+			u64::MAX,       // How much ref_time to devote for the execution. u64::MAX = use all.
+			u64::MAX,       // How much proof_size to devote for the execution. u64::MAX = use all.
+			&[u8::MAX; 32], // No deposit limit.
+			&[0u8; 32],     // Value transferred to the contract.
 			input,
 			None,
 		)
diff --git a/substrate/frame/revive/fixtures/contracts/create1_with_value.rs b/substrate/frame/revive/fixtures/contracts/create1_with_value.rs
index c6adab82886..3554f8f620a 100644
--- a/substrate/frame/revive/fixtures/contracts/create1_with_value.rs
+++ b/substrate/frame/revive/fixtures/contracts/create1_with_value.rs
@@ -34,6 +34,16 @@ pub extern "C" fn call() {
 	api::value_transferred(&mut value);
 
 	// Deploy the contract with no salt (equivalent to create1).
-	let ret = api::instantiate(code_hash, 0u64, 0u64, None, &value, &[], None, None, None);
+	let ret = api::instantiate(
+		code_hash,
+		u64::MAX,
+		u64::MAX,
+		&[u8::MAX; 32],
+		&value,
+		&[],
+		None,
+		None,
+		None
+	);
 	assert!(ret.is_ok());
 }
diff --git a/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs b/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs
index a12c36af856..5bb11e27903 100644
--- a/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs
+++ b/substrate/frame/revive/fixtures/contracts/create_storage_and_call.rs
@@ -43,10 +43,10 @@ pub extern "C" fn call() {
 	let ret = api::call(
 		uapi::CallFlags::empty(),
 		callee,
-		0u64, // How much ref_time weight to devote for the execution. 0 = all.
-		0u64, // How much proof_size weight to devote for the execution. 0 = all.
-		Some(deposit_limit),
-		&[0u8; 32], // Value transferred to the contract.
+		u64::MAX,      // How much ref_time weight to devote for the execution. u64::MAX = use all resources.
+		u64::MAX,      // How much proof_size weight to devote for the execution. u64::MAX = use all resources.
+		deposit_limit,
+		&[0u8; 32],    // Value transferred to the contract.
 		input,
 		None,
 	);
diff --git a/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs b/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs
index ecc0fc79e6f..f627bc8ba6c 100644
--- a/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs
+++ b/substrate/frame/revive/fixtures/contracts/create_storage_and_instantiate.rs
@@ -41,9 +41,9 @@ pub extern "C" fn call() {
 
 	let ret = api::instantiate(
 		code_hash,
-		0u64, // How much ref_time weight to devote for the execution. 0 = all.
-		0u64, // How much proof_size weight to devote for the execution. 0 = all.
-		Some(deposit_limit),
+		u64::MAX, // How much ref_time weight to devote for the execution. u64::MAX = use all.
+		u64::MAX, // How much proof_size weight to devote for the execution. u64::MAX = use all.
+		deposit_limit,
 		&value,
 		input,
 		Some(&mut address),
diff --git a/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs b/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs
index cf12fed2756..660db84028d 100644
--- a/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs
+++ b/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs
@@ -49,10 +49,10 @@ pub extern "C" fn call() {
 	api::call(
 		uapi::CallFlags::empty(),
 		callee,
-		0u64, // How much ref_time weight to devote for the execution. 0 = all.
-		0u64, // How much proof_size weight to devote for the execution. 0 = all.
-		None,
-		&[0u8; 32], // Value transferred to the contract.
+		u64::MAX,       // How much ref_time weight to devote for the execution. u64::MAX = all.
+		u64::MAX,       // How much proof_size weight to devote for the execution. u64::MAX = all.
+		&[u8::MAX; 32], // No deposit limit.
+		&[0u8; 32],     // Value transferred to the contract.
 		input,
 		None,
 	)
diff --git a/substrate/frame/revive/fixtures/contracts/delegate_call.rs b/substrate/frame/revive/fixtures/contracts/delegate_call.rs
index 3cf74acf132..0dedd5f704c 100644
--- a/substrate/frame/revive/fixtures/contracts/delegate_call.rs
+++ b/substrate/frame/revive/fixtures/contracts/delegate_call.rs
@@ -46,7 +46,15 @@ pub extern "C" fn call() {
 	assert!(value[0] == 2u8);
 
 	let input = [0u8; 0];
-	api::delegate_call(uapi::CallFlags::empty(), address, ref_time, proof_size, None, &input, None).unwrap();
+	api::delegate_call(
+		uapi::CallFlags::empty(),
+		address,
+		ref_time,
+		proof_size,
+		&[u8::MAX; 32],
+		&input,
+		None
+	).unwrap();
 
 	api::get_storage(StorageFlags::empty(), &key, value).unwrap();
 	assert!(value[0] == 1u8);
diff --git a/substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs b/substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs
index 0f157f5a18a..0c503aa93c5 100644
--- a/substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs
+++ b/substrate/frame/revive/fixtures/contracts/delegate_call_deposit_limit.rs
@@ -34,7 +34,15 @@ pub extern "C" fn call() {
 	);
 
 	let input = [0u8; 0];
-	let ret = api::delegate_call(uapi::CallFlags::empty(), address, 0, 0, Some(&u256_bytes(deposit_limit)), &input, None);
+	let ret = api::delegate_call(
+		uapi::CallFlags::empty(),
+		address,
+		u64::MAX,
+		u64::MAX,
+		&u256_bytes(deposit_limit),
+		&input,
+		None
+	);
 
 	if let Err(code) = ret {
 		api::return_value(uapi::ReturnFlags::REVERT, &(code as u32).to_le_bytes());
diff --git a/substrate/frame/revive/fixtures/contracts/delegate_call_simple.rs b/substrate/frame/revive/fixtures/contracts/delegate_call_simple.rs
index a8501dad469..b7bdb792c76 100644
--- a/substrate/frame/revive/fixtures/contracts/delegate_call_simple.rs
+++ b/substrate/frame/revive/fixtures/contracts/delegate_call_simple.rs
@@ -32,5 +32,13 @@ pub extern "C" fn call() {
 
 	// Delegate call into passed address.
 	let input = [0u8; 0];
-	api::delegate_call(uapi::CallFlags::empty(), address, 0, 0, None, &input, None).unwrap();
+	api::delegate_call(
+		uapi::CallFlags::empty(),
+		address,
+		u64::MAX,
+		u64::MAX,
+		&[u8::MAX; 32],
+		&input,
+		None
+	).unwrap();
 }
diff --git a/substrate/frame/revive/fixtures/contracts/destroy_and_transfer.rs b/substrate/frame/revive/fixtures/contracts/destroy_and_transfer.rs
index 8342f4acf95..c2c7da528ba 100644
--- a/substrate/frame/revive/fixtures/contracts/destroy_and_transfer.rs
+++ b/substrate/frame/revive/fixtures/contracts/destroy_and_transfer.rs
@@ -35,9 +35,9 @@ pub extern "C" fn deploy() {
 
 	api::instantiate(
 		code_hash,
-		0u64, // How much ref_time weight to devote for the execution. 0 = all.
-		0u64, // How much proof_size weight to devote for the execution. 0 = all.
-		None, // No deposit limit.
+		u64::MAX,       // How much ref_time weight to devote for the execution. u64::MAX = use all.
+		u64::MAX,       // How much proof_size weight to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32], // No deposit limit.
 		&VALUE,
 		&input,
 		Some(&mut address),
@@ -62,9 +62,9 @@ pub extern "C" fn call() {
 	let res = api::call(
 		uapi::CallFlags::empty(),
 		&callee_addr,
-		0u64, // How much ref_time weight to devote for the execution. 0 = all.
-		0u64, // How much proof_size weight to devote for the execution. 0 = all.
-		None, // No deposit limit.
+		u64::MAX,       // How much ref_time weight to devote for the execution. u64::MAX = use all.
+		u64::MAX,       // How much proof_size weight to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32], // No deposit limit.
 		&VALUE,
 		&[0u8; 1],
 		None,
@@ -75,9 +75,9 @@ pub extern "C" fn call() {
 	api::call(
 		uapi::CallFlags::empty(),
 		&callee_addr,
-		0u64, // How much ref_time weight to devote for the execution. 0 = all.
-		0u64, // How much proof_size weight to devote for the execution. 0 = all.
-		None, // No deposit limit.
+		u64::MAX,       // How much ref_time weight to devote for the execution. u64::MAX = use all.
+		u64::MAX,       // How much proof_size weight to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32], // No deposit limit.
 		&VALUE,
 		&[0u8; 0],
 		None,
diff --git a/substrate/frame/revive/fixtures/contracts/drain.rs b/substrate/frame/revive/fixtures/contracts/drain.rs
index 6e3e708a6b3..53fb213143c 100644
--- a/substrate/frame/revive/fixtures/contracts/drain.rs
+++ b/substrate/frame/revive/fixtures/contracts/drain.rs
@@ -41,7 +41,7 @@ pub extern "C" fn call() {
 		&[0u8; 20],
 		0,
 		0,
-		None,
+		&[u8::MAX; 32],
 		&u256_bytes(balance),
 		&[],
 		None,
diff --git a/substrate/frame/revive/fixtures/contracts/instantiate_return_code.rs b/substrate/frame/revive/fixtures/contracts/instantiate_return_code.rs
index 9764859c619..f7cbd75be5a 100644
--- a/substrate/frame/revive/fixtures/contracts/instantiate_return_code.rs
+++ b/substrate/frame/revive/fixtures/contracts/instantiate_return_code.rs
@@ -33,10 +33,9 @@ pub extern "C" fn call() {
 
 	let err_code = match api::instantiate(
 		code_hash,
-		0u64, // How much ref_time weight to devote for the execution. 0 = all.
-		0u64, /* How much proof_size weight to devote for the execution. 0 =
-		       * all. */
-		None,                   // No deposit limit.
+		u64::MAX,               // How much ref_time weight to devote for the execution. u64::MAX = use all.
+		u64::MAX,               // How much proof_size weight to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32],         // No deposit limit.
 		&u256_bytes(10_000u64), // Value to transfer.
 		input,
 		None,
diff --git a/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs b/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs
index 3d7702c6537..6be5d5c72f9 100644
--- a/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs
+++ b/substrate/frame/revive/fixtures/contracts/locking_delegate_dependency.rs
@@ -52,7 +52,15 @@ fn load_input(delegate_call: bool) {
 	}
 
 	if delegate_call {
-		api::delegate_call(uapi::CallFlags::empty(), address, 0, 0, None, &[], None).unwrap();
+		api::delegate_call(
+			uapi::CallFlags::empty(),
+			address,
+			u64::MAX,
+			u64::MAX,
+			&[u8::MAX; 32],
+			&[],
+			None
+		).unwrap();
 	}
 }
 
diff --git a/substrate/frame/revive/fixtures/contracts/origin.rs b/substrate/frame/revive/fixtures/contracts/origin.rs
index 8e9afd8e805..151ca3da77c 100644
--- a/substrate/frame/revive/fixtures/contracts/origin.rs
+++ b/substrate/frame/revive/fixtures/contracts/origin.rs
@@ -49,9 +49,9 @@ pub extern "C" fn call() {
 	api::call(
 		uapi::CallFlags::ALLOW_REENTRY,
 		&addr,
-		0u64,
-		0u64,
-		None,
+		u64::MAX,
+		u64::MAX,
+		&[u8::MAX; 32],
 		&[0; 32],
 		&[],
 		Some(&mut &mut buf[..]),
diff --git a/substrate/frame/revive/fixtures/contracts/read_only_call.rs b/substrate/frame/revive/fixtures/contracts/read_only_call.rs
index ea74d56867f..0a87ecbb9b1 100644
--- a/substrate/frame/revive/fixtures/contracts/read_only_call.rs
+++ b/substrate/frame/revive/fixtures/contracts/read_only_call.rs
@@ -39,10 +39,10 @@ pub extern "C" fn call() {
 	api::call(
 		uapi::CallFlags::READ_ONLY,
 		callee_addr,
-		0u64,       // How much ref_time to devote for the execution. 0 = all.
-		0u64,       // How much proof_size to devote for the execution. 0 = all.
-		None,       // No deposit limit.
-		&[0u8; 32], // Value transferred to the contract.
+		u64::MAX,                 // How much ref_time to devote for the execution. u64::MAX = all.
+		u64::MAX,                 // How much proof_size to devote for the execution. u64::MAX = all.
+		&[u8::MAX; 32],           // No deposit limit.
+		&[0u8; 32],               // Value transferred to the contract.
 		callee_input,
 		None,
 	)
diff --git a/substrate/frame/revive/fixtures/contracts/recurse.rs b/substrate/frame/revive/fixtures/contracts/recurse.rs
index 2e70d67d8c7..ead565c0145 100644
--- a/substrate/frame/revive/fixtures/contracts/recurse.rs
+++ b/substrate/frame/revive/fixtures/contracts/recurse.rs
@@ -43,10 +43,10 @@ pub extern "C" fn call() {
 	api::call(
 		uapi::CallFlags::ALLOW_REENTRY,
 		&addr,
-		0u64,       // How much ref_time to devote for the execution. 0 = all.
-		0u64,       // How much deposit_limit to devote for the execution. 0 = all.
-		None,       // No deposit limit.
-		&[0u8; 32], // Value transferred to the contract.
+		u64::MAX,       // How much ref_time to devote for the execution. u64::MAX = use all resources.
+		u64::MAX,       // How much proof_size to devote for the execution. u64::MAX = use all resources.
+		&[u8::MAX; 32], // No deposit limit.
+		&[0u8; 32],     // Value transferred to the contract.
 		&(calls_left - 1).to_le_bytes(),
 		None,
 	)
diff --git a/substrate/frame/revive/fixtures/contracts/return_data_api.rs b/substrate/frame/revive/fixtures/contracts/return_data_api.rs
index 1d483373cff..1407e5323ea 100644
--- a/substrate/frame/revive/fixtures/contracts/return_data_api.rs
+++ b/substrate/frame/revive/fixtures/contracts/return_data_api.rs
@@ -80,8 +80,16 @@ fn assert_return_data_size_of(expected: u64) {
 
 /// Assert the return data to be reset after a balance transfer.
 fn assert_balance_transfer_does_reset() {
-	api::call(uapi::CallFlags::empty(), &[0u8; 20], 0, 0, None, &u256_bytes(128), &[], None)
-		.unwrap();
+	api::call(
+		uapi::CallFlags::empty(),
+		&[0u8; 20],
+		u64::MAX,
+		u64::MAX,
+		&[u8::MAX; 32],
+		&u256_bytes(128),
+		&[],
+		None
+	).unwrap();
 	assert_return_data_size_of(0);
 }
 
@@ -111,9 +119,9 @@ pub extern "C" fn call() {
 	let mut instantiate = |exit_flag| {
 		api::instantiate(
 			code_hash,
-			0u64,
-			0u64,
-			None,
+			u64::MAX,
+			u64::MAX,
+			&[u8::MAX; 32],
 			&[0; 32],
 			&construct_input(exit_flag),
 			Some(&mut address_buf),
@@ -125,9 +133,9 @@ pub extern "C" fn call() {
 		api::call(
 			uapi::CallFlags::empty(),
 			address_buf,
-			0u64,
-			0u64,
-			None,
+			u64::MAX,
+			u64::MAX,
+			&[u8::MAX; 32],
 			&[0; 32],
 			&construct_input(exit_flag),
 			None,
diff --git a/substrate/frame/revive/fixtures/contracts/self_destruct.rs b/substrate/frame/revive/fixtures/contracts/self_destruct.rs
index 2f37706634b..053e545deb1 100644
--- a/substrate/frame/revive/fixtures/contracts/self_destruct.rs
+++ b/substrate/frame/revive/fixtures/contracts/self_destruct.rs
@@ -42,10 +42,10 @@ pub extern "C" fn call() {
 		api::call(
 			uapi::CallFlags::ALLOW_REENTRY,
 			&addr,
-			0u64,       // How much ref_time to devote for the execution. 0 = all.
-			0u64,       // How much proof_size to devote for the execution. 0 = all.
-			None,       // No deposit limit.
-			&[0u8; 32], // Value to transfer.
+			u64::MAX,                 // How much ref_time to devote for the execution. u64 = all.
+			u64::MAX,                 // How much proof_size to devote for the execution. u64 = all.
+			&[u8::MAX; 32],           // No deposit limit.
+			&[0u8; 32],               // Value to transfer.
 			&[0u8; 0],
 			None,
 		)
diff --git a/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs b/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs
index 09d45d0a841..053f97feda4 100644
--- a/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs
+++ b/substrate/frame/revive/fixtures/contracts/transfer_return_code.rs
@@ -33,7 +33,7 @@ pub extern "C" fn call() {
 		&[0u8; 20],
 		0,
 		0,
-		None,
+		&[u8::MAX; 32],
 		&u256_bytes(100u64),
 		&[],
 		None,
diff --git a/substrate/frame/revive/fixtures/src/lib.rs b/substrate/frame/revive/fixtures/src/lib.rs
index 38171edf115..7685253d1ea 100644
--- a/substrate/frame/revive/fixtures/src/lib.rs
+++ b/substrate/frame/revive/fixtures/src/lib.rs
@@ -22,7 +22,7 @@ extern crate alloc;
 // generated file that tells us where to find the fixtures
 include!(concat!(env!("OUT_DIR"), "/fixture_location.rs"));
 
-/// Load a given wasm module and returns a wasm binary contents along with it's hash.
+/// Load a given wasm module and returns a wasm binary contents along with its hash.
 #[cfg(feature = "std")]
 pub fn compile_module(fixture_name: &str) -> anyhow::Result<(Vec<u8>, sp_core::H256)> {
 	let out_dir: std::path::PathBuf = FIXTURE_DIR.into();
diff --git a/substrate/frame/revive/rpc/examples/js/pvm/FlipperCaller.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/FlipperCaller.polkavm
index 585fbb392a314c15a35e7e529106738bde3be02a..38a1098fe3a767aa0af74764bf7247e59f6110b7 100644
GIT binary patch
delta 1279
zcmY+CT}&KR6vy}8S>|h5m|e@38e(^sVY0h1Ep^(6SQDhH>}<fyEEMr04aVvWl*o=r
zv$IVM)UN4liG<Fr3XKmWk`(ZvO-f(<3VuBBAf`4^YZ{YP`_KmsL9x<Bif6FYHkoth
zo_o&y&u`8-bM-gsX|=n<<GFE~UI|TjUw&y|V04l`{7QCUFi($Ndl-+-5CKVhzNHh)
z*US#~3-&VG$Gy&ta4Vd~zsSGq`_89T9;y7HGURXe|L)&i^?cQds>!P9i#ocF8;1We
z1&cCkvXmJzsF9js>R6z3I3zPIG8<Nf2R-W=6DL{k-gt>J`_;T2Ml#D|l+*Vkt?i@5
ztCMe2THB{bUsz&LrtQSs>Z>1<Mejb}amv!wT&{Dw*CPIkK~zWHsCX1zCzmRcb*J!F
z>^N4Waj-Ek8*CVpF;+&yM4{tY!X(1&!Z#7oxDXw^P}Z0q$vizVG>x-;7qDqe!i}q7
zm%$dm=CRCrExkz<#HCvrQwyZwO;;*`%b}E1!eK?Kr8X2va2M8HJk&-?3RATspl2E)
zrAUqLag5p+(LI!@H&G+{JT~?Hlw}B}(LzO2Qj+qTMhXHY2((jB6^ch0<`(8zbZ~pP
zNPcCW@V!P`hT2uT^Ql<gEeYvmgZ|R0%&J1-+G>WuG#O&wM~{&@uATnvZ;h$hAfMB{
z<Qwk5&;f7<ZUeiU{m4G>4&4TN1n}5}cNf-OJlKWDEWjgxhaxn($1&`}16vaYcm(kH
zKX?r6*ky<F5}dbVV8<@|Zb}JRN+_#@429(RJr9wa8uQ6|&ghXDnd3RMH4;t7An*ZM
z;M0%yted(znhR%`XV&xQa)xT^DX5StGzTE3c^%rE)q7zQdMqO~dy>f;y%cCMIlY&3
z_}VIF{?eE$7bkoPw9Q%D(r+1D(U|cytG|bQM#by;*YY{&BPgpvYa@i7TXXgLQgiRI
zWRdP7S^timbAM{gPi2>t3(L$suuq?b$d_c=-*M<@8E`gXkCg9&P3?l5XWb~k!f5^v
zi|@c<WEm~vC*?A<chm}J*myZ_S?<58s*qm#17?kMR$W20M5=xgf4@xns^jQGLaJlZ
zo68x->&ZhAk<5ua2gUSs$L5x0Misix!wacG&pGmQ^}-MvDcC|Z8jM7LF=By0F&T{p
zf=b5ecg~3MMEon;o)X`g61CImOk7cd#pGT`P@Gv&fuvwmam1~sAOstNMcWpWd>~Q~
zqOG=_D%f_tdq)f;lCz2}Dw%YrGbJjb<_Ja5wArD}54w_fgc|Pd?%#Adj+1H?l|)<#
zHbl0v-JFcGEjn|Lc(mvgQeyE`QA$Jp*)x*jr0?culug+wF~T3@BL(q+_*(^`byyU~
MKxn~9Ll-^&0#ACHtN;K2

delta 1201
zcmY+CT}&KR6vy}8ncX|HA26&fC8pA0xJ-5l*6L^^*2Z-+Ei-FLW=5dI57KJu43x+Y
zMt8T3p(sh0U<mAOt2UYtvk9ONl9VP&Vx-1sec(Y&{Ys34^a0~TEn<t?QoI9!w8@;z
zx##@v|NiElS*)F|4H#|BKHv0cV7c-*`|^?Q?&03R(PQcEo?KvLya5iMBm$5ho(bHC
zE+WW%!hO!|<yHPg{u{nFxI5^C#zVE?ec{RQVmKW6G6KpP%38~M%T8Rd!Bto{d)pNn
z8F%|0#;uGoL)#O~iHiB7l?qBIoM;I5`&La9C2982_;-xkW#nuTC|nSNjQu1qn`Y*I
z8GC~<o4$Dc`D+MRO>a-Hyiz1{>|>!WCTkm+%(FY#BP8PQ0e<qP|8X!!F8bA-uffe;
z7u4k}jWi91#$iaR{R%it^gtYzL(<6auKokKoub<vbC2(FyQ*^Il*_dn4MCbJndmDr
z8+gAj57V6^&~;8xi_d6G(U_ny1{IFYdSZO-KNA6pF0L=-dAeNkY4R)-HF+LZ4cVul
zYhR^%{)TIsyaLyBc@eG+*<-YP8)hBBb$pB_vkdDx95ZCk(t4Q5+4C?6LKa$pK@PO;
z5PL}p?F$_WWF4cWYS;eQOWtLW3>S^uTv0efs9##K&|iLXj{5)@MBxuYW`*?g<A<KR
zX`=2s6wXsUa*(b%?ocwI$v((Y4B*-<r4u~5Dvyry=w@ko=LVe!H5o%n7lT9gJgqmt
zob4m$g30PjH%#>7YQ||-kirLfusPhu3EG$;H-f3XJ*%#53}-|O9b3(fXB@+|HP=ZP
z!V^zXjk!t(mbOJY)Ap>ReS8|_9Fd`3C}WG{WT?si^S>tg^TM~GdLT^R+VoF+xNM?N
z35Yxx?Z1`FtPf5zgk%RTr%57`*|qSGiIz(q880g6F0=!eY3DccQ>3}$of18K9qp~s
zJ!sksRP(CWP1_0jOKqVg;09U(f~=Ex?{um}hclcNEiO^YWwYLw8HSL0fNp<?^q2hr
z4v?1ery;vU-Yt)VU&*ENm^^jeLaZ-GuM8+W4)XNsd~LCX>k2Z2w%_Rs7()9MQZ8KX
z%X2j<PO4vjZ%9&fP*o*dDB>$Pu3IfuzM?|c>vjDj>1@#|6mWFV!8HZksuw(wTEH~f
zc=#He!ci*EZ<CTLS0hzO(lFL>bX1)XCS|%wk2mYoVpP?WxS;NrB<hw|wZ7z*$(gO}
z-o(;VDTyD&lSQnb8PGP5^bXdoeDN;xL7a+8qqtr^AEWwC>pZ6PTl%`X^}H|;Yl#mC
Wh2pkok^_;#NP;SAo~e&x*7rX_jeaBm

diff --git a/substrate/frame/revive/rpc/examples/js/pvm/PiggyBank.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/PiggyBank.polkavm
index 3f96fdfc21d8d8fc5bf68a34f0c9f7b055afa2a4..d0082db90e5e398832e4a32a9ec86dce83d16dd5 100644
GIT binary patch
delta 1052
zcmY*WZA@EL7{2Gw_S}z_-8R4wr?xkz7m8*QJKg?h8rF;+;@s^O6m*LyGwlzeYnDOL
zOo#y~Rt-HbnCv4NHkbfe6h#-~4>NTxL}MhzD1Ic?s0qd;$P$NhVR$Yg!{j{Y&3VrA
zydURWtS!}gtcUhXQmNosZhy+v`FK~?$T831Cy#VJ-Rt3B8+hcoLsGZ&taL^y!@F=Z
zK8XAAW%ru9lN8BOIiqmpBjpzLpn9*?p{;5Ud&9m*eUrY{^5ycEw^isn^#l4b{rIef
z9>d|_YDQncnaX*bshY$2$`XFYUpibBC9$X+wDiE>YLcwLS7p1=EO=ct>SCGQl)H4|
zV{pSV+2w6_XH#uGJr8boS#Ee+sfBs619iYVE*f~rlWdyt{XD<mvYYlFui5#n%W-ID
zTpJGhDn#60pikhcyMP)X?|Hv(5JyQ=w)85`-$|0i39}zD^F6f2%o%5!bhdG{mN3sF
zIiHT9O!|Tl%sccPTFa-#Q6{y3@~I9He?Zxkl}VLQ!u&-X7X{rf;w99ZI*-&$s!f%V
zO@4Q}vB|F<X70fnGaE27>jZ-Ep;GM(;|k)sx8M)5*ViU!OCTW-gP43bib1y=MnyO+
z--kNkd-;Q#iYx2GwQhF>{+(n8U$Mz=L;|l?4WBBvqkg!e%%V}4R)gp$e5T$R+IuBQ
z-o4(FwxR?@6{;%E%%aAOz@)%9+^kVF4l%9Lv-oF{JaQdglkb2b&4;F7Oe>)|==K`u
zbvWTYjbymT*N)zR5#Q&#OP7<xz9wEFo8B`$M6&6`k+g(+1)_>dP)|z2*>vpW&}~=i
z>aYFx;df2LX!-r<5d2hr7R|sb+fJbhsMk-SEL_xIz{^YUfWO-F%~F!Ezu--^2FK1p
z&VQ@^<5HAZdgFH@AWM&b3FrJ5`Wj__C?2PV@kGsVE;m8PCTOxi!&4?>#$Zlnte&x=
zF-9XSM_D+`iZsqgZB7|?uw^ydEjhn`i=Oiq*ec)^278{y8-yzo=lkL`%=n9a6K06h
zu~9nAb!In5{+Cj?*DFyL;?t&5pi|8o30ru8@i?cUO$#4L+~{asHo<Vh9;QOXX>p(7
z^xrD%frv=7StiLE2PmSEa6!mK!p#f8RkDD=ZZIqm<F+H)pbW#{xm-f@eX^0VkpY>v
Iuuw?)2l-oAiU0rr

delta 1068
zcmY*XZ)h839KPS%OYTpa^e)h5|8Qx1b8REIvCCKmDKXsWy<w5$=Cm7gcC32&P_+SF
z(oRi2I2R4o_TCEW1{;!MyN*$)!X`ypt#cnNPVkc;21Q{gy7j}lIct2=b=?N{@ZLT5
z{GR7|@7`+Lr8al!=_JQp8g*aa{|tZRaDV^EA$Q;N1N|=#x}Dh&8nK_{j&jGi(_90J
zq69jKifGAIbL|nz!a?zXbWEC(-12T&^AOMPo>-&LYj{V!UA`ZEN1Ilf0!pWHNO@r<
z1<#>yY$dB)K-uPLlx>+n<>omwq*nV{bRnjT!Ia`ZzLFNI^oNFBaEdnao22g>tnSjE
zAAa>3-Nx_S9CzjIo?LEkfPc(Iv>rsX6obhSqXj++G5RTw{ja*y-P7J*ugvpSck=a3
zkABJ1?R?Zz;@5_8fp)mQgY)#3YZNxpvitMGOGp=VF{QM)|49pnk*O8I)XsodwquaB
zD?qeqsF~UYs1a=fYGu2Z(X)`ZQ(1eCNf+4TWhmQ4wtfqP_8E|~c8@HACHzs3wS?c?
zMBBiMDhwh6hsuHKP_EjSC4mjJ%(uHZ)PDKjdQ<wi=xgd^n(Ykj3_%3+im;QuD`L1v
zXT&ZTqD$fz8!s-e?r2|t_ur|%WpddPt_mC-;M?dmX$$mHuRH@&bY2d^JM<TMTkC-v
zY2mZ~a+#DafG%NKVtf05WsESC7>aa<2M3Bsmuk8vs0*?O?*1HHqc3{8V2+;iRN-ej
z+!%scI^B2*w$h|`KYUF;@h<LKUP=qrZT4aEna;B(gnY&v$W&pFL6>9!a&{H+nb=1s
zw%%%w+`j9)W1dd=_P{IjrtchlL&uv=!Xxwv<s`gEe^TDsEM7|sM{edavDMCftTQ_-
zx{y-3{-STFL0F`x)rXWDf7hw_b+$lCF|N`@b)lffOQvBgME$BtNQ97&LnL7(9mkMW
z)p4F;H@hRmaunhu5{^MixMLg>2ZtH%yT&RL%EO`i^f)}JJ)r3@9G;YV*_2hn!zDA}
z;AWDH7chx*5P!JUsf<|<STg&{orpNzm-L5-_5NDoBn~_HZA{|pBUl=}*Jx~2M){G7
zpD+<7#;{TEf40tB%;;Xso8prMnN}jiWXy2=1>+(PHXL)Eac~ij*JW!W!!WR_j^b7)
Pdfaq^l8Pstc%1tmWiL>e

diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs
index e67c39ec089..1796348ff32 100644
--- a/substrate/frame/revive/src/benchmarking/mod.rs
+++ b/substrate/frame/revive/src/benchmarking/mod.rs
@@ -1648,8 +1648,8 @@ mod benchmarks {
 				memory.as_mut_slice(),
 				CallFlags::CLONE_INPUT.bits(), // flags
 				0,                             // callee_ptr
-				0,                             // ref_time_limit
-				0,                             // proof_size_limit
+				u64::MAX,                      // ref_time_limit
+				u64::MAX,                      // proof_size_limit
 				callee_len,                    // deposit_ptr
 				callee_len + deposit_len,      // value_ptr
 				0,                             // input_data_ptr
@@ -1688,8 +1688,8 @@ mod benchmarks {
 				memory.as_mut_slice(),
 				0,           // flags
 				0,           // address_ptr
-				0,           // ref_time_limit
-				0,           // proof_size_limit
+				u64::MAX,    // ref_time_limit
+				u64::MAX,    // proof_size_limit
 				address_len, // deposit_ptr
 				0,           // input_data_ptr
 				0,           // input_data_len
@@ -1715,7 +1715,7 @@ mod benchmarks {
 		let value_bytes = Into::<U256>::into(value).encode();
 		let value_len = value_bytes.len() as u32;
 
-		let deposit: BalanceOf<T> = 0u32.into();
+		let deposit: BalanceOf<T> = BalanceOf::<T>::max_value();
 		let deposit_bytes = Into::<U256>::into(deposit).encode();
 		let deposit_len = deposit_bytes.len() as u32;
 
@@ -1750,8 +1750,8 @@ mod benchmarks {
 			result = runtime.bench_instantiate(
 				memory.as_mut_slice(),
 				0,                   // code_hash_ptr
-				0,                   // ref_time_limit
-				0,                   // proof_size_limit
+				u64::MAX,            // ref_time_limit
+				u64::MAX,            // proof_size_limit
 				offset(hash_len),    // deposit_ptr
 				offset(deposit_len), // value_ptr
 				offset(value_len),   // input_data_ptr
diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs
index 478e96dc994..c069216d6cc 100644
--- a/substrate/frame/revive/src/exec.rs
+++ b/substrate/frame/revive/src/exec.rs
@@ -53,7 +53,7 @@ use sp_core::{
 };
 use sp_io::{crypto::secp256k1_ecdsa_recover_compressed, hashing::blake2_256};
 use sp_runtime::{
-	traits::{BadOrigin, Convert, Dispatchable, Saturating, Zero},
+	traits::{BadOrigin, Bounded, Convert, Dispatchable, Saturating, Zero},
 	DispatchError, SaturatedConversion,
 };
 
@@ -885,9 +885,9 @@ where
 			args,
 			value,
 			gas_meter,
-			Weight::zero(),
+			Weight::max_value(),
 			storage_meter,
-			BalanceOf::<T>::zero(),
+			BalanceOf::<T>::max_value(),
 			false,
 			true,
 		)?
@@ -1117,25 +1117,15 @@ where
 				return Ok(output);
 			}
 
-			// Storage limit is normally enforced as late as possible (when the last frame returns)
-			// so that the ordering of storage accesses does not matter.
-			// (However, if a special limit was set for a sub-call, it should be enforced right
-			// after the sub-call returned. See below for this case of enforcement).
-			if self.frames.is_empty() {
-				let frame = &mut self.first_frame;
-				frame.contract_info.load(&frame.account_id);
-				let contract = frame.contract_info.as_contract();
-				frame.nested_storage.enforce_limit(contract)?;
-			}
-
 			let frame = self.top_frame_mut();
 
-			// If a special limit was set for the sub-call, we enforce it here.
-			// The sub-call will be rolled back in case the limit is exhausted.
+			// The storage deposit is only charged at the end of every call stack.
+			// To make sure that no sub call uses more than it is allowed to,
+			// the limit is manually enforced here.
 			let contract = frame.contract_info.as_contract();
 			frame
 				.nested_storage
-				.enforce_subcall_limit(contract)
+				.enforce_limit(contract)
 				.map_err(|e| ExecError { error: e, origin: ErrorOrigin::Callee })?;
 
 			let account_id = T::AddressMapper::to_address(&frame.account_id);
@@ -1463,7 +1453,7 @@ where
 				FrameArgs::Call { dest: dest.clone(), cached_info, delegated_call: None },
 				value,
 				gas_limit,
-				deposit_limit.try_into().map_err(|_| Error::<T>::BalanceConversionFailed)?,
+				deposit_limit.saturated_into::<BalanceOf<T>>(),
 				// Enable read-only access if requested; cannot disable it if already set.
 				read_only || self.is_read_only(),
 			)? {
@@ -1519,7 +1509,7 @@ where
 			},
 			value,
 			gas_limit,
-			deposit_limit.try_into().map_err(|_| Error::<T>::BalanceConversionFailed)?,
+			deposit_limit.saturated_into::<BalanceOf<T>>(),
 			self.is_read_only(),
 		)?;
 		self.run(executable.expect(FRAME_ALWAYS_EXISTS_ON_INSTANTIATE), input_data)
@@ -1549,7 +1539,7 @@ where
 			},
 			value.try_into().map_err(|_| Error::<T>::BalanceConversionFailed)?,
 			gas_limit,
-			deposit_limit.try_into().map_err(|_| Error::<T>::BalanceConversionFailed)?,
+			deposit_limit.saturated_into::<BalanceOf<T>>(),
 			self.is_read_only(),
 		)?;
 		let address = T::AddressMapper::to_address(&self.top_frame().account_id);
@@ -3098,8 +3088,8 @@ mod tests {
 				let (address, output) = ctx
 					.ext
 					.instantiate(
-						Weight::zero(),
-						U256::zero(),
+						Weight::MAX,
+						U256::MAX,
 						dummy_ch,
 						<Test as Config>::Currency::minimum_balance().into(),
 						vec![],
@@ -3802,8 +3792,8 @@ mod tests {
 		let succ_fail_code = MockLoader::insert(Constructor, move |ctx, _| {
 			ctx.ext
 				.instantiate(
-					Weight::zero(),
-					U256::zero(),
+					Weight::MAX,
+					U256::MAX,
 					fail_code,
 					ctx.ext.minimum_balance() * 100,
 					vec![],
@@ -3819,8 +3809,8 @@ mod tests {
 			let addr = ctx
 				.ext
 				.instantiate(
-					Weight::zero(),
-					U256::zero(),
+					Weight::MAX,
+					U256::MAX,
 					success_code,
 					ctx.ext.minimum_balance() * 100,
 					vec![],
@@ -4597,7 +4587,7 @@ mod tests {
 				// Successful instantiation should set the output
 				let address = ctx
 					.ext
-					.instantiate(Weight::zero(), U256::zero(), ok_ch, value, vec![], None)
+					.instantiate(Weight::MAX, U256::MAX, ok_ch, value, vec![], None)
 					.unwrap();
 				assert_eq!(
 					ctx.ext.last_frame_output(),
@@ -4606,15 +4596,7 @@ mod tests {
 
 				// Balance transfers should reset the output
 				ctx.ext
-					.call(
-						Weight::zero(),
-						U256::zero(),
-						&address,
-						U256::from(1),
-						vec![],
-						true,
-						false,
-					)
+					.call(Weight::MAX, U256::MAX, &address, U256::from(1), vec![], true, false)
 					.unwrap();
 				assert_eq!(ctx.ext.last_frame_output(), &Default::default());
 
@@ -4827,7 +4809,7 @@ mod tests {
 
 				// Constructors can not access the immutable data
 				ctx.ext
-					.instantiate(Weight::zero(), U256::zero(), dummy_ch, value, vec![], None)
+					.instantiate(Weight::MAX, U256::MAX, dummy_ch, value, vec![], None)
 					.unwrap();
 
 				exec_success()
@@ -4944,7 +4926,7 @@ mod tests {
 			move |ctx, _| {
 				let value = <Test as Config>::Currency::minimum_balance().into();
 				ctx.ext
-					.instantiate(Weight::zero(), U256::zero(), dummy_ch, value, vec![], None)
+					.instantiate(Weight::MAX, U256::MAX, dummy_ch, value, vec![], None)
 					.unwrap();
 
 				exec_success()
@@ -4989,7 +4971,7 @@ mod tests {
 			move |ctx, _| {
 				let value = <Test as Config>::Currency::minimum_balance().into();
 				ctx.ext
-					.instantiate(Weight::zero(), U256::zero(), dummy_ch, value, vec![], None)
+					.instantiate(Weight::MAX, U256::MAX, dummy_ch, value, vec![], None)
 					.unwrap();
 
 				exec_success()
diff --git a/substrate/frame/revive/src/gas.rs b/substrate/frame/revive/src/gas.rs
index 5c30a0a5100..e8338db1219 100644
--- a/substrate/frame/revive/src/gas.rs
+++ b/substrate/frame/revive/src/gas.rs
@@ -22,7 +22,7 @@ use frame_support::{
 	weights::Weight,
 	DefaultNoBound,
 };
-use sp_runtime::{traits::Zero, DispatchError};
+use sp_runtime::DispatchError;
 
 #[cfg(test)]
 use std::{any::Any, fmt::Debug};
@@ -168,25 +168,19 @@ impl<T: Config> GasMeter<T> {
 		}
 	}
 
-	/// Create a new gas meter by removing gas from the current meter.
+	/// Create a new gas meter by removing *all* the gas from the current meter.
 	///
-	/// # Note
-	///
-	/// Passing `0` as amount is interpreted as "all remaining gas".
+	/// This should only be used by the primordial frame in a sequence of calls - every subsequent
+	/// frame should use [`nested`](Self::nested).
+	pub fn nested_take_all(&mut self) -> Self {
+		let gas_left = self.gas_left;
+		self.gas_left -= gas_left;
+		GasMeter::new(gas_left)
+	}
+
+	/// Create a new gas meter for a nested call by removing gas from the current meter.
 	pub fn nested(&mut self, amount: Weight) -> Self {
-		let amount = Weight::from_parts(
-			if amount.ref_time().is_zero() {
-				self.gas_left().ref_time()
-			} else {
-				amount.ref_time()
-			},
-			if amount.proof_size().is_zero() {
-				self.gas_left().proof_size()
-			} else {
-				amount.proof_size()
-			},
-		)
-		.min(self.gas_left);
+		let amount = amount.min(self.gas_left);
 		self.gas_left -= amount;
 		GasMeter::new(amount)
 	}
@@ -392,6 +386,50 @@ mod tests {
 		assert!(gas_meter.charge(SimpleToken(1)).is_err());
 	}
 
+	/// Previously, passing a `Weight` of 0 to `nested` would consume all of the meter's current
+	/// gas.
+	///
+	/// Now, a `Weight` of 0 means no gas for the nested call.
+	#[test]
+	fn nested_zero_gas_requested() {
+		let test_weight = 50000.into();
+		let mut gas_meter = GasMeter::<Test>::new(test_weight);
+		let gas_for_nested_call = gas_meter.nested(0.into());
+
+		assert_eq!(gas_meter.gas_left(), 50000.into());
+		assert_eq!(gas_for_nested_call.gas_left(), 0.into())
+	}
+
+	#[test]
+	fn nested_some_gas_requested() {
+		let test_weight = 50000.into();
+		let mut gas_meter = GasMeter::<Test>::new(test_weight);
+		let gas_for_nested_call = gas_meter.nested(10000.into());
+
+		assert_eq!(gas_meter.gas_left(), 40000.into());
+		assert_eq!(gas_for_nested_call.gas_left(), 10000.into())
+	}
+
+	#[test]
+	fn nested_all_gas_requested() {
+		let test_weight = Weight::from_parts(50000, 50000);
+		let mut gas_meter = GasMeter::<Test>::new(test_weight);
+		let gas_for_nested_call = gas_meter.nested(test_weight);
+
+		assert_eq!(gas_meter.gas_left(), Weight::from_parts(0, 0));
+		assert_eq!(gas_for_nested_call.gas_left(), 50_000.into())
+	}
+
+	#[test]
+	fn nested_excess_gas_requested() {
+		let test_weight = Weight::from_parts(50000, 50000);
+		let mut gas_meter = GasMeter::<Test>::new(test_weight);
+		let gas_for_nested_call = gas_meter.nested(test_weight + 10000.into());
+
+		assert_eq!(gas_meter.gas_left(), Weight::from_parts(0, 0));
+		assert_eq!(gas_for_nested_call.gas_left(), 50_000.into())
+	}
+
 	// Make sure that the gas meter does not charge in case of overcharge
 	#[test]
 	fn overcharge_does_not_charge() {
diff --git a/substrate/frame/revive/src/primitives.rs b/substrate/frame/revive/src/primitives.rs
index a7127f812b4..452d2c8a306 100644
--- a/substrate/frame/revive/src/primitives.rs
+++ b/substrate/frame/revive/src/primitives.rs
@@ -72,7 +72,7 @@ pub struct ContractResult<R, Balance, EventRecord> {
 	///
 	/// # Note
 	///
-	/// This can only different from [`Self::gas_consumed`] when weight pre charging
+	/// This can only be different from [`Self::gas_consumed`] when weight pre charging
 	/// is used. Currently, only `seal_call_runtime` makes use of pre charging.
 	/// Additionally, any `seal_call` or `seal_instantiate` makes use of pre-charging
 	/// when a non-zero `gas_limit` argument is supplied.
diff --git a/substrate/frame/revive/src/storage/meter.rs b/substrate/frame/revive/src/storage/meter.rs
index 6eddf048be9..4febcb0c406 100644
--- a/substrate/frame/revive/src/storage/meter.rs
+++ b/substrate/frame/revive/src/storage/meter.rs
@@ -101,12 +101,8 @@ pub struct Root;
 
 /// State parameter that constitutes a meter that is in its nested state.
 /// Its value indicates whether the nested meter has its own limit.
-#[derive(DefaultNoBound, RuntimeDebugNoBound)]
-pub enum Nested {
-	#[default]
-	DerivedLimit,
-	OwnLimit,
-}
+#[derive(Default, Debug)]
+pub struct Nested;
 
 impl State for Root {}
 impl State for Nested {}
@@ -125,10 +121,8 @@ pub struct RawMeter<T: Config, E, S: State + Default + Debug> {
 	/// We only have one charge per contract hence the size of this vector is
 	/// limited by the maximum call depth.
 	charges: Vec<Charge<T>>,
-	/// We store the nested state to determine if it has a special limit for sub-call.
-	nested: S,
 	/// Type parameter only used in impls.
-	_phantom: PhantomData<E>,
+	_phantom: PhantomData<(E, S)>,
 }
 
 /// This type is used to describe a storage change when charging from the meter.
@@ -281,21 +275,14 @@ where
 	S: State + Default + Debug,
 {
 	/// Create a new child that has its `limit`.
-	/// Passing `0` as the limit is interpreted as to take whatever is remaining from its parent.
 	///
 	/// This is called whenever a new subcall is initiated in order to track the storage
 	/// usage for this sub call separately. This is necessary because we want to exchange balance
 	/// with the current contract we are interacting with.
 	pub fn nested(&self, limit: BalanceOf<T>) -> RawMeter<T, E, Nested> {
 		debug_assert!(matches!(self.contract_state(), ContractState::Alive));
-		// If a special limit is specified higher than it is available,
-		// we want to enforce the lesser limit to the nested meter, to fail in the sub-call.
-		let limit = self.available().min(limit);
-		if limit.is_zero() {
-			RawMeter { limit: self.available(), ..Default::default() }
-		} else {
-			RawMeter { limit, nested: Nested::OwnLimit, ..Default::default() }
-		}
+
+		RawMeter { limit: self.available().min(limit), ..Default::default() }
 	}
 
 	/// Absorb a child that was spawned to handle a sub call.
@@ -477,13 +464,6 @@ impl<T: Config, E: Ext<T>> RawMeter<T, E, Nested> {
 
 	/// [`Self::charge`] does not enforce the storage limit since we want to do this check as late
 	/// as possible to allow later refunds to offset earlier charges.
-	///
-	/// # Note
-	///
-	/// We normally need to call this **once** for every call stack and not for every cross contract
-	/// call. However, if a dedicated limit is specified for a sub-call, this needs to be called
-	/// once the sub-call has returned. For this, the [`Self::enforce_subcall_limit`] wrapper is
-	/// used.
 	pub fn enforce_limit(
 		&mut self,
 		info: Option<&mut ContractInfo<T>>,
@@ -502,18 +482,6 @@ impl<T: Config, E: Ext<T>> RawMeter<T, E, Nested> {
 		}
 		Ok(())
 	}
-
-	/// This is a wrapper around [`Self::enforce_limit`] to use on the exit from a sub-call to
-	/// enforce its special limit if needed.
-	pub fn enforce_subcall_limit(
-		&mut self,
-		info: Option<&mut ContractInfo<T>>,
-	) -> Result<(), DispatchError> {
-		match self.nested {
-			Nested::OwnLimit => self.enforce_limit(info),
-			Nested::DerivedLimit => Ok(()),
-		}
-	}
 }
 
 impl<T: Config> Ext<T> for ReservingExt {
@@ -724,6 +692,49 @@ mod tests {
 		)
 	}
 
+	/// Previously, passing a limit of 0 meant unlimited storage for a nested call.
+	///
+	/// Now, a limit of 0 means the subcall will not be able to use any storage.
+	#[test]
+	fn nested_zero_limit_requested() {
+		clear_ext();
+
+		let meter = TestMeter::new(&Origin::from_account_id(ALICE), 1_000, 0).unwrap();
+		assert_eq!(meter.available(), 1_000);
+		let nested0 = meter.nested(BalanceOf::<Test>::zero());
+		assert_eq!(nested0.available(), 0);
+	}
+
+	#[test]
+	fn nested_some_limit_requested() {
+		clear_ext();
+
+		let meter = TestMeter::new(&Origin::from_account_id(ALICE), 1_000, 0).unwrap();
+		assert_eq!(meter.available(), 1_000);
+		let nested0 = meter.nested(500);
+		assert_eq!(nested0.available(), 500);
+	}
+
+	#[test]
+	fn nested_all_limit_requested() {
+		clear_ext();
+
+		let meter = TestMeter::new(&Origin::from_account_id(ALICE), 1_000, 0).unwrap();
+		assert_eq!(meter.available(), 1_000);
+		let nested0 = meter.nested(1_000);
+		assert_eq!(nested0.available(), 1_000);
+	}
+
+	#[test]
+	fn nested_over_limit_requested() {
+		clear_ext();
+
+		let meter = TestMeter::new(&Origin::from_account_id(ALICE), 1_000, 0).unwrap();
+		assert_eq!(meter.available(), 1_000);
+		let nested0 = meter.nested(2_000);
+		assert_eq!(nested0.available(), 1_000);
+	}
+
 	#[test]
 	fn empty_charge_works() {
 		clear_ext();
@@ -879,7 +890,7 @@ mod tests {
 			let mut meter = TestMeter::new(&test_case.origin, 1_000, 0).unwrap();
 			assert_eq!(meter.available(), 1_000);
 
-			let mut nested0 = meter.nested(BalanceOf::<Test>::zero());
+			let mut nested0 = meter.nested(BalanceOf::<Test>::max_value());
 			nested0.charge(&Diff {
 				bytes_added: 5,
 				bytes_removed: 1,
@@ -895,7 +906,7 @@ mod tests {
 				items_deposit: 20,
 				immutable_data_len: 0,
 			});
-			let mut nested1 = nested0.nested(BalanceOf::<Test>::zero());
+			let mut nested1 = nested0.nested(BalanceOf::<Test>::max_value());
 			nested1.charge(&Diff { items_removed: 5, ..Default::default() });
 			nested1.charge(&Diff { bytes_added: 20, ..Default::default() });
 			nested1.terminate(&nested1_info, CHARLIE);
diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs
index 664578bf767..cf02d17a4d0 100644
--- a/substrate/frame/revive/src/tests.rs
+++ b/substrate/frame/revive/src/tests.rs
@@ -1149,7 +1149,7 @@ fn delegate_call() {
 
 		assert_ok!(builder::call(caller_addr)
 			.value(1337)
-			.data((callee_addr, 0u64, 0u64).encode())
+			.data((callee_addr, u64::MAX, u64::MAX).encode())
 			.build());
 	});
 }
@@ -2261,12 +2261,12 @@ fn gas_estimation_for_subcalls() {
 
 		// Run the test for all of those weight limits for the subcall
 		let weights = [
-			Weight::zero(),
+			Weight::MAX,
 			GAS_LIMIT,
 			GAS_LIMIT * 2,
 			GAS_LIMIT / 5,
-			Weight::from_parts(0, GAS_LIMIT.proof_size()),
-			Weight::from_parts(GAS_LIMIT.ref_time(), 0),
+			Weight::from_parts(u64::MAX, GAS_LIMIT.proof_size()),
+			Weight::from_parts(GAS_LIMIT.ref_time(), u64::MAX),
 		];
 
 		// This call is passed to the sub call in order to create a large `required_weight`
@@ -3453,13 +3453,13 @@ fn deposit_limit_in_nested_calls() {
 
 		// We do not remove any storage but add a storage item of 12 bytes in the caller
 		// contract. This would cost 12 + 2 = 14 Balance.
-		// The nested call doesn't get a special limit, which is set by passing 0 to it.
+		// The nested call doesn't get a special limit, which is set by passing `u64::MAX` to it.
 		// This should fail as the specified parent's limit is less than the cost: 13 <
 		// 14.
 		assert_err_ignore_postinfo!(
 			builder::call(addr_caller)
 				.storage_deposit_limit(13)
-				.data((100u32, &addr_callee, U256::from(0u64)).encode())
+				.data((100u32, &addr_callee, U256::MAX).encode())
 				.build(),
 			<Error<Test>>::StorageDepositLimitExhausted,
 		);
@@ -3467,13 +3467,13 @@ fn deposit_limit_in_nested_calls() {
 		// Now we specify the parent's limit high enough to cover the caller's storage
 		// additions. However, we use a single byte more in the callee, hence the storage
 		// deposit should be 15 Balance.
-		// The nested call doesn't get a special limit, which is set by passing 0 to it.
+		// The nested call doesn't get a special limit, which is set by passing `u64::MAX` to it.
 		// This should fail as the specified parent's limit is less than the cost: 14
 		// < 15.
 		assert_err_ignore_postinfo!(
 			builder::call(addr_caller)
 				.storage_deposit_limit(14)
-				.data((101u32, &addr_callee, U256::from(0u64)).encode())
+				.data((101u32, &addr_callee, &U256::MAX).encode())
 				.build(),
 			<Error<Test>>::StorageDepositLimitExhausted,
 		);
@@ -3495,7 +3495,7 @@ fn deposit_limit_in_nested_calls() {
 		assert_err_ignore_postinfo!(
 			builder::call(addr_caller)
 				.storage_deposit_limit(0)
-				.data((87u32, &addr_callee, U256::from(0u64)).encode())
+				.data((87u32, &addr_callee, &U256::MAX.to_little_endian()).encode())
 				.build(),
 			<Error<Test>>::StorageDepositLimitExhausted,
 		);
@@ -3551,28 +3551,24 @@ fn deposit_limit_in_nested_instantiate() {
 		//
 		// Provided the limit is set to be 1 Balance less,
 		// this call should fail on the return from the caller contract.
-		assert_err_ignore_postinfo!(
-			builder::call(addr_caller)
-				.origin(RuntimeOrigin::signed(BOB))
-				.storage_deposit_limit(callee_info_len + 2 + ED + 1)
-				.data((0u32, &code_hash_callee, U256::from(0u64)).encode())
-				.build(),
-			<Error<Test>>::StorageDepositLimitExhausted,
-		);
+		let ret = builder::bare_call(addr_caller)
+			.origin(RuntimeOrigin::signed(BOB))
+			.storage_deposit_limit(DepositLimit::Balance(callee_info_len + 2 + ED + 1))
+			.data((0u32, &code_hash_callee, &U256::MAX.to_little_endian()).encode())
+			.build_and_unwrap_result();
+		assert_return_code!(ret, RuntimeReturnCode::OutOfResources);
 		// The charges made on instantiation should be rolled back.
 		assert_eq!(<Test as Config>::Currency::free_balance(&BOB), 1_000_000);
 
 		// Now we give enough limit for the instantiation itself, but require for 1 more storage
 		// byte in the constructor. Hence +1 Balance to the limit is needed. This should fail on
 		// the return from constructor.
-		assert_err_ignore_postinfo!(
-			builder::call(addr_caller)
-				.origin(RuntimeOrigin::signed(BOB))
-				.storage_deposit_limit(callee_info_len + 2 + ED + 2)
-				.data((1u32, &code_hash_callee, U256::from(0u64)).encode())
-				.build(),
-			<Error<Test>>::StorageDepositLimitExhausted,
-		);
+		let ret = builder::bare_call(addr_caller)
+			.origin(RuntimeOrigin::signed(BOB))
+			.storage_deposit_limit(DepositLimit::Balance(callee_info_len + 2 + ED + 2))
+			.data((1u32, &code_hash_callee, U256::from(0u64)).encode())
+			.build_and_unwrap_result();
+		assert_return_code!(ret, RuntimeReturnCode::OutOfResources);
 		// The charges made on the instantiation should be rolled back.
 		assert_eq!(<Test as Config>::Currency::free_balance(&BOB), 1_000_000);
 
@@ -4856,20 +4852,18 @@ fn skip_transfer_works() {
 		);
 
 		// fails when calling from a contract when gas is specified.
-		assert_err!(
-			Pallet::<Test>::bare_eth_transact(
-				GenericTransaction {
-					from: Some(BOB_ADDR),
-					to: Some(caller_addr),
-					input: Some((0u32, &addr).encode().into()),
-					gas: Some(1u32.into()),
-					..Default::default()
-				},
-				Weight::MAX,
-				|_| 0u32
-			),
-			EthTransactError::Message(format!("insufficient funds for gas * price + value: address {BOB_ADDR:?} have 0 (supplied gas 1)"))
-		);
+		assert!(Pallet::<Test>::bare_eth_transact(
+			GenericTransaction {
+				from: Some(BOB_ADDR),
+				to: Some(caller_addr),
+				input: Some((0u32, &addr).encode().into()),
+				gas: Some(1u32.into()),
+				..Default::default()
+			},
+			Weight::MAX,
+			|_| 0u32
+		)
+		.is_err(),);
 
 		// works when no gas is specified.
 		assert_ok!(Pallet::<Test>::bare_eth_transact(
diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs
index 52f79f2eb55..8529c7d9e73 100644
--- a/substrate/frame/revive/src/wasm/runtime.rs
+++ b/substrate/frame/revive/src/wasm/runtime.rs
@@ -1004,8 +1004,7 @@ impl<'a, E: Ext, M: ?Sized + Memory<E::T>> Runtime<'a, E, M> {
 		self.charge_gas(call_type.cost())?;
 
 		let callee = memory.read_h160(callee_ptr)?;
-		let deposit_limit =
-			if deposit_ptr == SENTINEL { U256::zero() } else { memory.read_u256(deposit_ptr)? };
+		let deposit_limit = memory.read_u256(deposit_ptr)?;
 
 		let input_data = if flags.contains(CallFlags::CLONE_INPUT) {
 			let input = self.input_data.as_ref().ok_or(Error::<E::T>::InputForwarded)?;
@@ -1091,8 +1090,7 @@ impl<'a, E: Ext, M: ?Sized + Memory<E::T>> Runtime<'a, E, M> {
 		salt_ptr: u32,
 	) -> Result<ReturnErrorCode, TrapReason> {
 		self.charge_gas(RuntimeCosts::Instantiate { input_data_len })?;
-		let deposit_limit: U256 =
-			if deposit_ptr == SENTINEL { U256::zero() } else { memory.read_u256(deposit_ptr)? };
+		let deposit_limit: U256 = memory.read_u256(deposit_ptr)?;
 		let value = memory.read_u256(value_ptr)?;
 		let code_hash = memory.read_h256(code_hash_ptr)?;
 		let input_data = memory.read(input_data_ptr, input_data_len)?;
diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs
index d90c0f45205..ba0a63b15c3 100644
--- a/substrate/frame/revive/uapi/src/host.rs
+++ b/substrate/frame/revive/uapi/src/host.rs
@@ -113,7 +113,7 @@ pub trait HostFn: private::Sealed {
 		callee: &[u8; 20],
 		ref_time_limit: u64,
 		proof_size_limit: u64,
-		deposit: Option<&[u8; 32]>,
+		deposit: &[u8; 32],
 		value: &[u8; 32],
 		input_data: &[u8],
 		output: Option<&mut &mut [u8]>,
@@ -202,7 +202,7 @@ pub trait HostFn: private::Sealed {
 		address: &[u8; 20],
 		ref_time_limit: u64,
 		proof_size_limit: u64,
-		deposit_limit: Option<&[u8; 32]>,
+		deposit_limit: &[u8; 32],
 		input_data: &[u8],
 		output: Option<&mut &mut [u8]>,
 	) -> Result;
@@ -318,7 +318,7 @@ pub trait HostFn: private::Sealed {
 		code_hash: &[u8; 32],
 		ref_time_limit: u64,
 		proof_size_limit: u64,
-		deposit: Option<&[u8; 32]>,
+		deposit: &[u8; 32],
 		value: &[u8; 32],
 		input: &[u8],
 		address: Option<&mut [u8; 20]>,
diff --git a/substrate/frame/revive/uapi/src/host/riscv64.rs b/substrate/frame/revive/uapi/src/host/riscv64.rs
index c83be942a97..8c40bc9f48e 100644
--- a/substrate/frame/revive/uapi/src/host/riscv64.rs
+++ b/substrate/frame/revive/uapi/src/host/riscv64.rs
@@ -168,7 +168,7 @@ impl HostFn for HostFnImpl {
 		code_hash: &[u8; 32],
 		ref_time_limit: u64,
 		proof_size_limit: u64,
-		deposit_limit: Option<&[u8; 32]>,
+		deposit_limit: &[u8; 32],
 		value: &[u8; 32],
 		input: &[u8],
 		mut address: Option<&mut [u8; 20]>,
@@ -180,7 +180,7 @@ impl HostFn for HostFnImpl {
 			None => crate::SENTINEL as _,
 		};
 		let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output);
-		let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit);
+		let deposit_limit_ptr = deposit_limit.as_ptr();
 		let salt_ptr = ptr_or_sentinel(&salt);
 		#[repr(C)]
 		#[allow(dead_code)]
@@ -225,13 +225,13 @@ impl HostFn for HostFnImpl {
 		callee: &[u8; 20],
 		ref_time_limit: u64,
 		proof_size_limit: u64,
-		deposit_limit: Option<&[u8; 32]>,
+		deposit_limit: &[u8; 32],
 		value: &[u8; 32],
 		input: &[u8],
 		mut output: Option<&mut &mut [u8]>,
 	) -> Result {
 		let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output);
-		let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit);
+		let deposit_limit_ptr = deposit_limit.as_ptr();
 		#[repr(C)]
 		#[allow(dead_code)]
 		struct Args {
@@ -273,12 +273,12 @@ impl HostFn for HostFnImpl {
 		address: &[u8; 20],
 		ref_time_limit: u64,
 		proof_size_limit: u64,
-		deposit_limit: Option<&[u8; 32]>,
+		deposit_limit: &[u8; 32],
 		input: &[u8],
 		mut output: Option<&mut &mut [u8]>,
 	) -> Result {
 		let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output);
-		let deposit_limit_ptr = ptr_or_sentinel(&deposit_limit);
+		let deposit_limit_ptr = deposit_limit.as_ptr();
 		#[repr(C)]
 		#[allow(dead_code)]
 		struct Args {
-- 
GitLab


From d822e07d51dda41982291dc6582a8c4a34821e94 Mon Sep 17 00:00:00 2001
From: PG Herveou <pgherveou@gmail.com>
Date: Wed, 15 Jan 2025 14:48:38 +0100
Subject: [PATCH 062/116] [pallet-revive] Bump asset-hub westend spec version
 (#7176)

Bump asset-hub westend spec version

---------

Co-authored-by: command-bot <>
---
 .../assets/asset-hub-westend/src/lib.rs       |   2 +-
 prdoc/pr_7176.prdoc                           |   9 +++++++++
 .../frame/revive/rpc/revive_chain.metadata    | Bin 661594 -> 661585 bytes
 3 files changed, 10 insertions(+), 1 deletion(-)
 create mode 100644 prdoc/pr_7176.prdoc

diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
index cfc150ce5d6..7844b0d885e 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
@@ -125,7 +125,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
 	spec_name: alloc::borrow::Cow::Borrowed("westmint"),
 	impl_name: alloc::borrow::Cow::Borrowed("westmint"),
 	authoring_version: 1,
-	spec_version: 1_017_003,
+	spec_version: 1_017_004,
 	impl_version: 0,
 	apis: RUNTIME_API_VERSIONS,
 	transaction_version: 16,
diff --git a/prdoc/pr_7176.prdoc b/prdoc/pr_7176.prdoc
new file mode 100644
index 00000000000..b78f42014af
--- /dev/null
+++ b/prdoc/pr_7176.prdoc
@@ -0,0 +1,9 @@
+title: '[pallet-revive] Bump asset-hub westend spec version'
+doc:
+- audience: Runtime Dev
+  description: Bump asset-hub westend spec version
+crates:
+- name: asset-hub-westend-runtime
+  bump: minor
+- name: pallet-revive-eth-rpc
+  bump: minor
diff --git a/substrate/frame/revive/rpc/revive_chain.metadata b/substrate/frame/revive/rpc/revive_chain.metadata
index 402e8c2d22b21471929e9c61acd2cc968af614cf..a03c95b4944f663225642b1678ef66aaccec3fb5 100644
GIT binary patch
delta 92
zcmcb$LF3{EjSX``7+ojN4N+-68q$6=gmL@P5T;BYM&9ZA3z<x|r!8b!V<@o4HLtj|
nC{>{(Be4WXq_(SEW&&bnAZ7t#Rv=~rVs;?r*sgM!)65kB;58&I

delta 100
zcmcb(LF3j2jSX``7(*w|4N+mvNGxtX5Ym1igmL?U5T-02M#<^g3z<w9^R~w>WLjq^
vbi_5UxU?u$p(G=*1W2SRB(_UmW&&bnAZ7t#Rv=~rVs;?r*e-pU)65kB*&QY!

-- 
GitLab


From 77c78e1561bbe5ee0ecf414312bae82396ae6d11 Mon Sep 17 00:00:00 2001
From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com>
Date: Wed, 15 Jan 2025 18:50:42 +0200
Subject: [PATCH 063/116] litep2p: Provide partial results to speedup GetRecord
 queries (#7099)

This PR provides the partial results of the `GetRecord` kademlia query.

This significantly improves the authority discovery records, from ~37
minutes to ~2/3 minutes.
In contrast, libp2p discovers authority records in around ~10 minutes.

The authority discovery was slow because litep2p provided the records
only after the Kademlia query was completed. A normal Kademlia query
completes in around 40 seconds to a few minutes.
In this PR, partial records are provided as soon as they are discovered
from the network.

### Testing Done

Started a node in Kusama with `--validator` and litep2p backend.
The node discovered 996/1000 authority records in ~ 1 minute 45 seconds.

![Screenshot 2025-01-09 at 12 26
08](https://github.com/user-attachments/assets/b618bf7c-2bba-43a0-a021-4047e854c075)


### Before & After

In this image, on the left side is libp2p, in the middle litep2p without
this PR, on the right litep2p with this PR

![Screenshot 2025-01-07 at 17 57
56](https://github.com/user-attachments/assets/a8d467f7-8dc7-461c-bcff-163b94d01ae8)



Closes: https://github.com/paritytech/polkadot-sdk/issues/7077

cc @paritytech/networking

---------

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>
---
 Cargo.lock                                    |  4 +-
 Cargo.toml                                    |  2 +-
 prdoc/pr_7099.prdoc                           | 16 ++++
 .../client/network/src/litep2p/discovery.rs   | 33 +++++--
 substrate/client/network/src/litep2p/mod.rs   | 87 ++++++++-----------
 5 files changed, 79 insertions(+), 63 deletions(-)
 create mode 100644 prdoc/pr_7099.prdoc

diff --git a/Cargo.lock b/Cargo.lock
index 7725db743c4..0d71a770d38 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -10446,9 +10446,9 @@ dependencies = [
 
 [[package]]
 name = "litep2p"
-version = "0.8.4"
+version = "0.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b0fef34af8847e816003bf7fdeac5ea50b9a7a88441ac927a6166b5e812ab79"
+checksum = "6ca6ee50a125dc4fc4e9a3ae3640010796d1d07bc517a0ac715fdf0b24a0b6ac"
 dependencies = [
  "async-trait",
  "bs58",
diff --git a/Cargo.toml b/Cargo.toml
index c30a9949e85..eb99b80e16f 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -850,7 +850,7 @@ linked-hash-map = { version = "0.5.4" }
 linked_hash_set = { version = "0.1.4" }
 linregress = { version = "0.5.1" }
 lite-json = { version = "0.2.0", default-features = false }
-litep2p = { version = "0.8.4", features = ["websocket"] }
+litep2p = { version = "0.9.0", features = ["websocket"] }
 log = { version = "0.4.22", default-features = false }
 macro_magic = { version = "0.5.1" }
 maplit = { version = "1.0.2" }
diff --git a/prdoc/pr_7099.prdoc b/prdoc/pr_7099.prdoc
new file mode 100644
index 00000000000..58d809f3c09
--- /dev/null
+++ b/prdoc/pr_7099.prdoc
@@ -0,0 +1,16 @@
+title: Provide partial results to speedup GetRecord queries
+
+doc:
+  - audience: Node Dev
+    description: |
+      This PR provides the partial results of the GetRecord kademlia query.
+      
+      This significantly improves the authority discovery records, from ~37 minutes to ~2/3 minutes.
+      In contrast, libp2p discovers authority records in around ~10 minutes.
+      
+      The authority discovery was slow because litep2p provided the records only after the Kademlia query was completed. A normal Kademlia query completes in around 40 seconds to a few minutes.
+      In this PR, partial records are provided as soon as they are discovered from the network.
+
+crates:
+  - name: sc-network
+    bump: patch
diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs
index b55df374f60..eb571804f30 100644
--- a/substrate/client/network/src/litep2p/discovery.rs
+++ b/substrate/client/network/src/litep2p/discovery.rs
@@ -33,8 +33,8 @@ use litep2p::{
 			identify::{Config as IdentifyConfig, IdentifyEvent},
 			kademlia::{
 				Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, ContentProvider,
-				IncomingRecordValidationMode, KademliaEvent, KademliaHandle, QueryId, Quorum,
-				Record, RecordKey, RecordsType,
+				IncomingRecordValidationMode, KademliaEvent, KademliaHandle, PeerRecord, QueryId,
+				Quorum, Record, RecordKey,
 			},
 			ping::{Config as PingConfig, PingEvent},
 		},
@@ -129,13 +129,19 @@ pub enum DiscoveryEvent {
 		address: Multiaddr,
 	},
 
-	/// Record was found from the DHT.
+	/// `GetRecord` query succeeded.
 	GetRecordSuccess {
 		/// Query ID.
 		query_id: QueryId,
+	},
 
-		/// Records.
-		records: RecordsType,
+	/// Record was found from the DHT.
+	GetRecordPartialResult {
+		/// Query ID.
+		query_id: QueryId,
+
+		/// Record.
+		record: PeerRecord,
 	},
 
 	/// Record was successfully stored on the DHT.
@@ -573,13 +579,24 @@ impl Stream for Discovery {
 					peers: peers.into_iter().collect(),
 				}))
 			},
-			Poll::Ready(Some(KademliaEvent::GetRecordSuccess { query_id, records })) => {
+			Poll::Ready(Some(KademliaEvent::GetRecordSuccess { query_id })) => {
 				log::trace!(
 					target: LOG_TARGET,
-					"`GET_RECORD` succeeded for {query_id:?}: {records:?}",
+					"`GET_RECORD` succeeded for {query_id:?}",
 				);
 
-				return Poll::Ready(Some(DiscoveryEvent::GetRecordSuccess { query_id, records }));
+				return Poll::Ready(Some(DiscoveryEvent::GetRecordSuccess { query_id }));
+			},
+			Poll::Ready(Some(KademliaEvent::GetRecordPartialResult { query_id, record })) => {
+				log::trace!(
+					target: LOG_TARGET,
+					"`GET_RECORD` intermediary succeeded for {query_id:?}: {record:?}",
+				);
+
+				return Poll::Ready(Some(DiscoveryEvent::GetRecordPartialResult {
+					query_id,
+					record,
+				}));
 			},
 			Poll::Ready(Some(KademliaEvent::PutRecordSuccess { query_id, key: _ })) =>
 				return Poll::Ready(Some(DiscoveryEvent::PutRecordSuccess { query_id })),
diff --git a/substrate/client/network/src/litep2p/mod.rs b/substrate/client/network/src/litep2p/mod.rs
index 52b2970525d..fc4cce47628 100644
--- a/substrate/client/network/src/litep2p/mod.rs
+++ b/substrate/client/network/src/litep2p/mod.rs
@@ -58,7 +58,7 @@ use litep2p::{
 	protocol::{
 		libp2p::{
 			bitswap::Config as BitswapConfig,
-			kademlia::{QueryId, Record, RecordsType},
+			kademlia::{QueryId, Record},
 		},
 		request_response::ConfigBuilder as RequestResponseConfigBuilder,
 	},
@@ -836,23 +836,45 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkBackend<B, H> for Litep2pNetworkBac
 							self.peerstore_handle.add_known_peer(peer.into());
 						}
 					}
-					Some(DiscoveryEvent::GetRecordSuccess { query_id, records }) => {
+					Some(DiscoveryEvent::GetRecordPartialResult { query_id, record }) => {
+						if !self.pending_queries.contains_key(&query_id) {
+							log::error!(
+								target: LOG_TARGET,
+								"Missing/invalid pending query for `GET_VALUE` partial result: {query_id:?}"
+							);
+
+							continue
+						}
+
+						let peer_id: sc_network_types::PeerId = record.peer.into();
+						let record = PeerRecord {
+							record: P2PRecord {
+								key: record.record.key.to_vec().into(),
+								value: record.record.value,
+								publisher: record.record.publisher.map(|peer_id| {
+									let peer_id: sc_network_types::PeerId = peer_id.into();
+									peer_id.into()
+								}),
+								expires: record.record.expires,
+							},
+							peer: Some(peer_id.into()),
+						};
+
+						self.event_streams.send(
+							Event::Dht(
+								DhtEvent::ValueFound(
+									record.into()
+								)
+							)
+						);
+					}
+					Some(DiscoveryEvent::GetRecordSuccess { query_id }) => {
 						match self.pending_queries.remove(&query_id) {
 							Some(KadQuery::GetValue(key, started)) => {
 								log::trace!(
 									target: LOG_TARGET,
-									"`GET_VALUE` for {:?} ({query_id:?}) succeeded",
-									key,
+									"`GET_VALUE` for {key:?} ({query_id:?}) succeeded",
 								);
-								for record in litep2p_to_libp2p_peer_record(records) {
-									self.event_streams.send(
-										Event::Dht(
-											DhtEvent::ValueFound(
-												record.into()
-											)
-										)
-									);
-								}
 
 								if let Some(ref metrics) = self.metrics {
 									metrics
@@ -1165,42 +1187,3 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkBackend<B, H> for Litep2pNetworkBac
 		}
 	}
 }
-
-// Glue code to convert from a litep2p records type to a libp2p2 PeerRecord.
-fn litep2p_to_libp2p_peer_record(records: RecordsType) -> Vec<PeerRecord> {
-	match records {
-		litep2p::protocol::libp2p::kademlia::RecordsType::LocalStore(record) => {
-			vec![PeerRecord {
-				record: P2PRecord {
-					key: record.key.to_vec().into(),
-					value: record.value,
-					publisher: record.publisher.map(|peer_id| {
-						let peer_id: sc_network_types::PeerId = peer_id.into();
-						peer_id.into()
-					}),
-					expires: record.expires,
-				},
-				peer: None,
-			}]
-		},
-		litep2p::protocol::libp2p::kademlia::RecordsType::Network(records) => records
-			.into_iter()
-			.map(|record| {
-				let peer_id: sc_network_types::PeerId = record.peer.into();
-
-				PeerRecord {
-					record: P2PRecord {
-						key: record.record.key.to_vec().into(),
-						value: record.record.value,
-						publisher: record.record.publisher.map(|peer_id| {
-							let peer_id: sc_network_types::PeerId = peer_id.into();
-							peer_id.into()
-						}),
-						expires: record.record.expires,
-					},
-					peer: Some(peer_id.into()),
-				}
-			})
-			.collect::<Vec<_>>(),
-	}
-}
-- 
GitLab


From ece32e38a1a37aa354d51b16c07a42c66f23976e Mon Sep 17 00:00:00 2001
From: PG Herveou <pgherveou@gmail.com>
Date: Wed, 15 Jan 2025 18:37:59 +0100
Subject: [PATCH 064/116] [pallet-revive] Remove debug buffer (#7163)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Remove the `debug_buffer` feature

---------

Co-authored-by: command-bot <>
Co-authored-by: Cyrill Leutwiler <cyrill@parity.io>
Co-authored-by: Alexander Theißen <alex.theissen@me.com>
---
 .../assets/asset-hub-westend/src/lib.rs       |  15 +-
 prdoc/pr_7163.prdoc                           |  13 +
 substrate/bin/node/runtime/src/lib.rs         |  10 +-
 substrate/frame/revive/README.md              |  23 --
 .../contracts/debug_message_invalid_utf8.rs   |  33 ---
 .../debug_message_logging_disabled.rs         |  33 ---
 .../fixtures/contracts/debug_message_works.rs |  33 ---
 substrate/frame/revive/proc-macro/src/lib.rs  |   7 +-
 .../revive/src/benchmarking/call_builder.rs   |  15 +-
 .../frame/revive/src/benchmarking/mod.rs      |  28 ---
 substrate/frame/revive/src/exec.rs            | 228 +-----------------
 substrate/frame/revive/src/lib.rs             |  67 +----
 substrate/frame/revive/src/limits.rs          |   5 -
 substrate/frame/revive/src/primitives.rs      |  53 +---
 .../frame/revive/src/test_utils/builder.rs    |  17 +-
 substrate/frame/revive/src/tests.rs           | 146 +----------
 .../frame/revive/src/tests/test_debug.rs      |   4 -
 substrate/frame/revive/src/wasm/mod.rs        |   2 +-
 substrate/frame/revive/src/wasm/runtime.rs    |  34 +--
 substrate/frame/revive/src/weights.rs         |  21 --
 substrate/frame/revive/uapi/src/host.rs       |  20 --
 .../frame/revive/uapi/src/host/riscv64.rs     |   7 -
 substrate/frame/revive/uapi/src/lib.rs        |   7 +-
 23 files changed, 54 insertions(+), 767 deletions(-)
 create mode 100644 prdoc/pr_7163.prdoc
 delete mode 100644 substrate/frame/revive/fixtures/contracts/debug_message_invalid_utf8.rs
 delete mode 100644 substrate/frame/revive/fixtures/contracts/debug_message_logging_disabled.rs
 delete mode 100644 substrate/frame/revive/fixtures/contracts/debug_message_works.rs

diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
index 7844b0d885e..5966dd01f18 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
@@ -952,11 +952,6 @@ parameter_types! {
 	pub CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(30);
 }
 
-type EventRecord = frame_system::EventRecord<
-	<Runtime as frame_system::Config>::RuntimeEvent,
-	<Runtime as frame_system::Config>::Hash,
->;
-
 impl pallet_revive::Config for Runtime {
 	type Time = Timestamp;
 	type Currency = Balances;
@@ -2073,7 +2068,7 @@ impl_runtime_apis! {
 		}
 	}
 
-	impl pallet_revive::ReviveApi<Block, AccountId, Balance, Nonce, BlockNumber, EventRecord> for Runtime
+	impl pallet_revive::ReviveApi<Block, AccountId, Balance, Nonce, BlockNumber> for Runtime
 	{
 		fn balance(address: H160) -> U256 {
 			Revive::evm_balance(&address)
@@ -2108,7 +2103,7 @@ impl_runtime_apis! {
 			gas_limit: Option<Weight>,
 			storage_deposit_limit: Option<Balance>,
 			input_data: Vec<u8>,
-		) -> pallet_revive::ContractResult<pallet_revive::ExecReturnValue, Balance, EventRecord> {
+		) -> pallet_revive::ContractResult<pallet_revive::ExecReturnValue, Balance> {
 			let blockweights= <Runtime as frame_system::Config>::BlockWeights::get();
 			Revive::bare_call(
 				RuntimeOrigin::signed(origin),
@@ -2117,8 +2112,6 @@ impl_runtime_apis! {
 				gas_limit.unwrap_or(blockweights.max_block),
 				pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)),
 				input_data,
-				pallet_revive::DebugInfo::UnsafeDebug,
-				pallet_revive::CollectEvents::UnsafeCollect,
 			)
 		}
 
@@ -2130,7 +2123,7 @@ impl_runtime_apis! {
 			code: pallet_revive::Code,
 			data: Vec<u8>,
 			salt: Option<[u8; 32]>,
-		) -> pallet_revive::ContractResult<pallet_revive::InstantiateReturnValue, Balance, EventRecord>
+		) -> pallet_revive::ContractResult<pallet_revive::InstantiateReturnValue, Balance>
 		{
 			let blockweights= <Runtime as frame_system::Config>::BlockWeights::get();
 			Revive::bare_instantiate(
@@ -2141,8 +2134,6 @@ impl_runtime_apis! {
 				code,
 				data,
 				salt,
-				pallet_revive::DebugInfo::UnsafeDebug,
-				pallet_revive::CollectEvents::UnsafeCollect,
 			)
 		}
 
diff --git a/prdoc/pr_7163.prdoc b/prdoc/pr_7163.prdoc
new file mode 100644
index 00000000000..669c480b835
--- /dev/null
+++ b/prdoc/pr_7163.prdoc
@@ -0,0 +1,13 @@
+title: '[pallet-revive] Remove debug buffer'
+doc:
+- audience: Runtime Dev
+  description: Remove the `debug_buffer` feature
+crates:
+- name: asset-hub-westend-runtime
+  bump: minor
+- name: pallet-revive
+  bump: major
+- name: pallet-revive-proc-macro
+  bump: minor
+- name: pallet-revive-uapi
+  bump: minor
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index e11a009c1c3..97728f12f5f 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -3212,7 +3212,7 @@ impl_runtime_apis! {
 		}
 	}
 
-	impl pallet_revive::ReviveApi<Block, AccountId, Balance, Nonce, BlockNumber, EventRecord> for Runtime
+	impl pallet_revive::ReviveApi<Block, AccountId, Balance, Nonce, BlockNumber> for Runtime
 	{
 		fn balance(address: H160) -> U256 {
 			Revive::evm_balance(&address)
@@ -3247,7 +3247,7 @@ impl_runtime_apis! {
 			gas_limit: Option<Weight>,
 			storage_deposit_limit: Option<Balance>,
 			input_data: Vec<u8>,
-		) -> pallet_revive::ContractResult<pallet_revive::ExecReturnValue, Balance, EventRecord> {
+		) -> pallet_revive::ContractResult<pallet_revive::ExecReturnValue, Balance> {
 			Revive::bare_call(
 				RuntimeOrigin::signed(origin),
 				dest,
@@ -3255,8 +3255,6 @@ impl_runtime_apis! {
 				gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block),
 				pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)),
 				input_data,
-				pallet_revive::DebugInfo::UnsafeDebug,
-				pallet_revive::CollectEvents::UnsafeCollect,
 			)
 		}
 
@@ -3268,7 +3266,7 @@ impl_runtime_apis! {
 			code: pallet_revive::Code,
 			data: Vec<u8>,
 			salt: Option<[u8; 32]>,
-		) -> pallet_revive::ContractResult<pallet_revive::InstantiateReturnValue, Balance, EventRecord>
+		) -> pallet_revive::ContractResult<pallet_revive::InstantiateReturnValue, Balance>
 		{
 			Revive::bare_instantiate(
 				RuntimeOrigin::signed(origin),
@@ -3278,8 +3276,6 @@ impl_runtime_apis! {
 				code,
 				data,
 				salt,
-				pallet_revive::DebugInfo::UnsafeDebug,
-				pallet_revive::CollectEvents::UnsafeCollect,
 			)
 		}
 
diff --git a/substrate/frame/revive/README.md b/substrate/frame/revive/README.md
index 575920dfaac..7538f77d10b 100644
--- a/substrate/frame/revive/README.md
+++ b/substrate/frame/revive/README.md
@@ -49,29 +49,6 @@ This module executes PolkaVM smart contracts. These can potentially be written i
 RISC-V. For now, the only officially supported languages are Solidity (via [`revive`](https://github.com/xermicus/revive))
 and Rust (check the `fixtures` directory for Rust examples).
 
-## Debugging
-
-Contracts can emit messages to the client when called as RPC through the
-[`debug_message`](https://paritytech.github.io/substrate/master/pallet_revive/trait.SyscallDocs.html#tymethod.debug_message)
-API.
-
-Those messages are gathered into an internal buffer and sent to the RPC client. It is up to the individual client if
-and how those messages are presented to the user.
-
-This buffer is also printed as a debug message. In order to see these messages on the node console the log level for the
-`runtime::revive` target needs to be raised to at least the `debug` level. However, those messages are easy to
-overlook because of the noise generated by block production. A good starting point for observing them on the console is
-using this command line in the root directory of the Substrate repository:
-
-```bash
-cargo run --release -- --dev -lerror,runtime::revive=debug
-```
-
-This raises the log level of `runtime::revive` to `debug` and all other targets to `error` in order to prevent them
-from spamming the console.
-
-`--dev`: Use a dev chain spec `--tmp`: Use temporary storage for chain data (the chain state is deleted on exit)
-
 ## Host function tracing
 
 For contract authors, it can be a helpful debugging tool to see which host functions are called, with which arguments,
diff --git a/substrate/frame/revive/fixtures/contracts/debug_message_invalid_utf8.rs b/substrate/frame/revive/fixtures/contracts/debug_message_invalid_utf8.rs
deleted file mode 100644
index 6c850a9ec66..00000000000
--- a/substrate/frame/revive/fixtures/contracts/debug_message_invalid_utf8.rs
+++ /dev/null
@@ -1,33 +0,0 @@
-// This file is part of Substrate.
-
-// Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
-
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// 	http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Emit a debug message with an invalid utf-8 code.
-#![no_std]
-#![no_main]
-
-extern crate common;
-use uapi::{HostFn, HostFnImpl as api};
-
-#[no_mangle]
-#[polkavm_derive::polkavm_export]
-pub extern "C" fn deploy() {}
-
-#[no_mangle]
-#[polkavm_derive::polkavm_export]
-pub extern "C" fn call() {
-	api::debug_message(b"\xFC").unwrap();
-}
diff --git a/substrate/frame/revive/fixtures/contracts/debug_message_logging_disabled.rs b/substrate/frame/revive/fixtures/contracts/debug_message_logging_disabled.rs
deleted file mode 100644
index 0ce2b6b5628..00000000000
--- a/substrate/frame/revive/fixtures/contracts/debug_message_logging_disabled.rs
+++ /dev/null
@@ -1,33 +0,0 @@
-// This file is part of Substrate.
-
-// Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
-
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// 	http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Emit a "Hello World!" debug message but assume that logging is disabled.
-#![no_std]
-#![no_main]
-
-extern crate common;
-use uapi::{HostFn, HostFnImpl as api, ReturnErrorCode};
-
-#[no_mangle]
-#[polkavm_derive::polkavm_export]
-pub extern "C" fn deploy() {}
-
-#[no_mangle]
-#[polkavm_derive::polkavm_export]
-pub extern "C" fn call() {
-	assert_eq!(api::debug_message(b"Hello World!"), Err(ReturnErrorCode::LoggingDisabled));
-}
diff --git a/substrate/frame/revive/fixtures/contracts/debug_message_works.rs b/substrate/frame/revive/fixtures/contracts/debug_message_works.rs
deleted file mode 100644
index 3a2509509d8..00000000000
--- a/substrate/frame/revive/fixtures/contracts/debug_message_works.rs
+++ /dev/null
@@ -1,33 +0,0 @@
-// This file is part of Substrate.
-
-// Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
-
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// 	http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//! Emit a "Hello World!" debug message.
-#![no_std]
-#![no_main]
-
-extern crate common;
-use uapi::{HostFn, HostFnImpl as api};
-
-#[no_mangle]
-#[polkavm_derive::polkavm_export]
-pub extern "C" fn deploy() {}
-
-#[no_mangle]
-#[polkavm_derive::polkavm_export]
-pub extern "C" fn call() {
-	api::debug_message(b"Hello World!").unwrap();
-}
diff --git a/substrate/frame/revive/proc-macro/src/lib.rs b/substrate/frame/revive/proc-macro/src/lib.rs
index b09bdef1463..6e38063d20a 100644
--- a/substrate/frame/revive/proc-macro/src/lib.rs
+++ b/substrate/frame/revive/proc-macro/src/lib.rs
@@ -510,12 +510,7 @@ fn expand_functions(def: &EnvDef) -> TokenStream2 {
 			quote! {
 				// wrap body in closure to make sure the tracing is always executed
 				let result = (|| #body)();
-				if ::log::log_enabled!(target: "runtime::revive::strace", ::log::Level::Trace) {
-						use core::fmt::Write;
-						let mut msg = alloc::string::String::default();
-						let _ = core::write!(&mut msg, #trace_fmt_str, #( #trace_fmt_args, )* result);
-						self.ext().append_debug_buffer(&msg);
-				}
+				::log::trace!(target: "runtime::revive::strace", #trace_fmt_str, #( #trace_fmt_args, )* result);
 				result
 			}
 		};
diff --git a/substrate/frame/revive/src/benchmarking/call_builder.rs b/substrate/frame/revive/src/benchmarking/call_builder.rs
index 1177d47aadc..077e18ff5f0 100644
--- a/substrate/frame/revive/src/benchmarking/call_builder.rs
+++ b/substrate/frame/revive/src/benchmarking/call_builder.rs
@@ -22,7 +22,7 @@ use crate::{
 	storage::meter::Meter,
 	transient_storage::MeterEntry,
 	wasm::{PreparedCall, Runtime},
-	BalanceOf, Config, DebugBuffer, Error, GasMeter, MomentOf, Origin, WasmBlob, Weight,
+	BalanceOf, Config, Error, GasMeter, MomentOf, Origin, WasmBlob, Weight,
 };
 use alloc::{vec, vec::Vec};
 use frame_benchmarking::benchmarking;
@@ -38,7 +38,6 @@ pub struct CallSetup<T: Config> {
 	gas_meter: GasMeter<T>,
 	storage_meter: Meter<T>,
 	value: BalanceOf<T>,
-	debug_message: Option<DebugBuffer>,
 	data: Vec<u8>,
 	transient_storage_size: u32,
 }
@@ -91,7 +90,6 @@ where
 			gas_meter: GasMeter::new(Weight::MAX),
 			storage_meter,
 			value: 0u32.into(),
-			debug_message: None,
 			data: vec![],
 			transient_storage_size: 0,
 		}
@@ -122,16 +120,6 @@ where
 		self.transient_storage_size = size;
 	}
 
-	/// Set the debug message.
-	pub fn enable_debug_message(&mut self) {
-		self.debug_message = Some(Default::default());
-	}
-
-	/// Get the debug message.
-	pub fn debug_message(&self) -> Option<DebugBuffer> {
-		self.debug_message.clone()
-	}
-
 	/// Get the call's input data.
 	pub fn data(&self) -> Vec<u8> {
 		self.data.clone()
@@ -150,7 +138,6 @@ where
 			&mut self.gas_meter,
 			&mut self.storage_meter,
 			self.value,
-			self.debug_message.as_mut(),
 		);
 		if self.transient_storage_size > 0 {
 			Self::with_transient_storage(&mut ext.0, self.transient_storage_size).unwrap();
diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs
index 1796348ff32..e23554f21ba 100644
--- a/substrate/frame/revive/src/benchmarking/mod.rs
+++ b/substrate/frame/revive/src/benchmarking/mod.rs
@@ -107,8 +107,6 @@ where
 			Code::Upload(module.code),
 			data,
 			salt,
-			DebugInfo::Skip,
-			CollectEvents::Skip,
 		);
 
 		let address = outcome.result?.addr;
@@ -1047,32 +1045,6 @@ mod benchmarks {
 		);
 	}
 
-	// Benchmark debug_message call
-	// Whereas this function is used in RPC mode only, it still should be secured
-	// against an excessive use.
-	//
-	// i: size of input in bytes up to maximum allowed contract memory or maximum allowed debug
-	// buffer size, whichever is less.
-	#[benchmark]
-	fn seal_debug_message(
-		i: Linear<0, { (limits::code::BLOB_BYTES).min(limits::DEBUG_BUFFER_BYTES) }>,
-	) {
-		let mut setup = CallSetup::<T>::default();
-		setup.enable_debug_message();
-		let (mut ext, _) = setup.ext();
-		let mut runtime = crate::wasm::Runtime::<_, [u8]>::new(&mut ext, vec![]);
-		// Fill memory with printable ASCII bytes.
-		let mut memory = (0..i).zip((32..127).cycle()).map(|i| i.1).collect::<Vec<_>>();
-
-		let result;
-		#[block]
-		{
-			result = runtime.bench_debug_message(memory.as_mut_slice(), 0, i);
-		}
-		assert_ok!(result);
-		assert_eq!(setup.debug_message().unwrap().len() as u32, i);
-	}
-
 	#[benchmark(skip_meta, pov_mode = Measured)]
 	fn get_storage_empty() -> Result<(), BenchmarkError> {
 		let max_key_len = limits::STORAGE_KEY_BYTES;
diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs
index c069216d6cc..e20c5dd7786 100644
--- a/substrate/frame/revive/src/exec.rs
+++ b/substrate/frame/revive/src/exec.rs
@@ -24,8 +24,8 @@ use crate::{
 	runtime_decl_for_revive_api::{Decode, Encode, RuntimeDebugNoBound, TypeInfo},
 	storage::{self, meter::Diff, WriteOutcome},
 	transient_storage::TransientStorage,
-	BalanceOf, CodeInfo, CodeInfoOf, Config, ContractInfo, ContractInfoOf, DebugBuffer, Error,
-	Event, ImmutableData, ImmutableDataOf, Pallet as Contracts, LOG_TARGET,
+	BalanceOf, CodeInfo, CodeInfoOf, Config, ContractInfo, ContractInfoOf, Error, Event,
+	ImmutableData, ImmutableDataOf, Pallet as Contracts,
 };
 use alloc::vec::Vec;
 use core::{fmt::Debug, marker::PhantomData, mem};
@@ -378,19 +378,6 @@ pub trait Ext: sealing::Sealed {
 	/// Charges `diff` from the meter.
 	fn charge_storage(&mut self, diff: &Diff);
 
-	/// Append a string to the debug buffer.
-	///
-	/// It is added as-is without any additional new line.
-	///
-	/// This is a no-op if debug message recording is disabled which is always the case
-	/// when the code is executing on-chain.
-	///
-	/// Returns `true` if debug message recording is enabled. Otherwise `false` is returned.
-	fn append_debug_buffer(&mut self, msg: &str) -> bool;
-
-	/// Returns `true` if debug message recording is enabled. Otherwise `false` is returned.
-	fn debug_buffer_enabled(&self) -> bool;
-
 	/// Call some dispatchable and return the result.
 	fn call_runtime(&self, call: <Self::T as Config>::RuntimeCall) -> DispatchResultWithPostInfo;
 
@@ -555,11 +542,6 @@ pub struct Stack<'a, T: Config, E> {
 	frames: BoundedVec<Frame<T>, ConstU32<{ limits::CALL_STACK_DEPTH }>>,
 	/// Statically guarantee that each call stack has at least one frame.
 	first_frame: Frame<T>,
-	/// A text buffer used to output human readable information.
-	///
-	/// All the bytes added to this field should be valid UTF-8. The buffer has no defined
-	/// structure and is intended to be shown to users as-is for debugging purposes.
-	debug_message: Option<&'a mut DebugBuffer>,
 	/// Transient storage used to store data, which is kept for the duration of a transaction.
 	transient_storage: TransientStorage<T>,
 	/// Whether or not actual transfer of funds should be performed.
@@ -765,11 +747,6 @@ where
 {
 	/// Create and run a new call stack by calling into `dest`.
 	///
-	/// # Note
-	///
-	/// `debug_message` should only ever be set to `Some` when executing as an RPC because
-	/// it adds allocations and could be abused to drive the runtime into an OOM panic.
-	///
 	/// # Return Value
 	///
 	/// Result<(ExecReturnValue, CodeSize), (ExecError, CodeSize)>
@@ -781,7 +758,6 @@ where
 		value: U256,
 		input_data: Vec<u8>,
 		skip_transfer: bool,
-		debug_message: Option<&'a mut DebugBuffer>,
 	) -> ExecResult {
 		let dest = T::AddressMapper::to_account_id(&dest);
 		if let Some((mut stack, executable)) = Self::new(
@@ -791,7 +767,6 @@ where
 			storage_meter,
 			value,
 			skip_transfer,
-			debug_message,
 		)? {
 			stack.run(executable, input_data).map(|_| stack.first_frame.last_frame_output)
 		} else {
@@ -801,11 +776,6 @@ where
 
 	/// Create and run a new call stack by instantiating a new contract.
 	///
-	/// # Note
-	///
-	/// `debug_message` should only ever be set to `Some` when executing as an RPC because
-	/// it adds allocations and could be abused to drive the runtime into an OOM panic.
-	///
 	/// # Return Value
 	///
 	/// Result<(NewContractAccountId, ExecReturnValue), ExecError)>
@@ -818,7 +788,6 @@ where
 		input_data: Vec<u8>,
 		salt: Option<&[u8; 32]>,
 		skip_transfer: bool,
-		debug_message: Option<&'a mut DebugBuffer>,
 	) -> Result<(H160, ExecReturnValue), ExecError> {
 		let (mut stack, executable) = Self::new(
 			FrameArgs::Instantiate {
@@ -832,7 +801,6 @@ where
 			storage_meter,
 			value,
 			skip_transfer,
-			debug_message,
 		)?
 		.expect(FRAME_ALWAYS_EXISTS_ON_INSTANTIATE);
 		let address = T::AddressMapper::to_address(&stack.top_frame().account_id);
@@ -848,7 +816,6 @@ where
 		gas_meter: &'a mut GasMeter<T>,
 		storage_meter: &'a mut storage::meter::Meter<T>,
 		value: BalanceOf<T>,
-		debug_message: Option<&'a mut DebugBuffer>,
 	) -> (Self, E) {
 		Self::new(
 			FrameArgs::Call {
@@ -861,7 +828,6 @@ where
 			storage_meter,
 			value.into(),
 			false,
-			debug_message,
 		)
 		.unwrap()
 		.unwrap()
@@ -878,7 +844,6 @@ where
 		storage_meter: &'a mut storage::meter::Meter<T>,
 		value: U256,
 		skip_transfer: bool,
-		debug_message: Option<&'a mut DebugBuffer>,
 	) -> Result<Option<(Self, E)>, ExecError> {
 		origin.ensure_mapped()?;
 		let Some((first_frame, executable)) = Self::new_frame(
@@ -903,7 +868,6 @@ where
 			block_number: <frame_system::Pallet<T>>::block_number(),
 			first_frame,
 			frames: Default::default(),
-			debug_message,
 			transient_storage: TransientStorage::new(limits::TRANSIENT_STORAGE_BYTES),
 			skip_transfer,
 			_phantom: Default::default(),
@@ -1250,13 +1214,6 @@ where
 				}
 			}
 		} else {
-			if let Some((msg, false)) = self.debug_message.as_ref().map(|m| (m, m.is_empty())) {
-				log::debug!(
-					target: LOG_TARGET,
-					"Execution finished with debug buffer: {}",
-					core::str::from_utf8(msg).unwrap_or("<Invalid UTF8>"),
-				);
-			}
 			self.gas_meter.absorb_nested(mem::take(&mut self.first_frame.nested_gas));
 			if !persist {
 				return;
@@ -1759,28 +1716,6 @@ where
 		self.top_frame_mut().nested_storage.charge(diff)
 	}
 
-	fn debug_buffer_enabled(&self) -> bool {
-		self.debug_message.is_some()
-	}
-
-	fn append_debug_buffer(&mut self, msg: &str) -> bool {
-		if let Some(buffer) = &mut self.debug_message {
-			buffer
-				.try_extend(&mut msg.bytes())
-				.map_err(|_| {
-					log::debug!(
-						target: LOG_TARGET,
-						"Debug buffer (of {} bytes) exhausted!",
-						limits::DEBUG_BUFFER_BYTES,
-					)
-				})
-				.ok();
-			true
-		} else {
-			false
-		}
-	}
-
 	fn call_runtime(&self, call: <Self::T as Config>::RuntimeCall) -> DispatchResultWithPostInfo {
 		let mut origin: T::RuntimeOrigin = RawOrigin::Signed(self.account_id().clone()).into();
 		origin.add_filter(T::CallFilter::contains);
@@ -2103,7 +2038,6 @@ mod tests {
 					value.into(),
 					vec![],
 					false,
-					None,
 				),
 				Ok(_)
 			);
@@ -2196,7 +2130,6 @@ mod tests {
 				value.into(),
 				vec![],
 				false,
-				None,
 			)
 			.unwrap();
 
@@ -2237,7 +2170,6 @@ mod tests {
 				value.into(),
 				vec![],
 				false,
-				None,
 			));
 
 			assert_eq!(get_balance(&ALICE), 100 - value);
@@ -2274,7 +2206,6 @@ mod tests {
 					U256::zero(),
 					vec![],
 					false,
-					None,
 				),
 				ExecError {
 					error: Error::<Test>::CodeNotFound.into(),
@@ -2292,7 +2223,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			));
 		});
 	}
@@ -2321,7 +2251,6 @@ mod tests {
 				55u64.into(),
 				vec![],
 				false,
-				None,
 			)
 			.unwrap();
 
@@ -2371,7 +2300,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			);
 
 			let output = result.unwrap();
@@ -2401,7 +2329,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			);
 
 			let output = result.unwrap();
@@ -2431,7 +2358,6 @@ mod tests {
 				U256::zero(),
 				vec![1, 2, 3, 4],
 				false,
-				None,
 			);
 			assert_matches!(result, Ok(_));
 		});
@@ -2468,7 +2394,6 @@ mod tests {
 					vec![1, 2, 3, 4],
 					Some(&[0; 32]),
 					false,
-					None,
 				);
 				assert_matches!(result, Ok(_));
 			});
@@ -2523,7 +2448,6 @@ mod tests {
 				value.into(),
 				vec![],
 				false,
-				None,
 			);
 
 			assert_matches!(result, Ok(_));
@@ -2588,7 +2512,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			);
 
 			assert_matches!(result, Ok(_));
@@ -2654,7 +2577,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			);
 
 			assert_matches!(result, Ok(_));
@@ -2687,7 +2609,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			);
 			assert_matches!(result, Ok(_));
 		});
@@ -2725,7 +2646,6 @@ mod tests {
 				U256::zero(),
 				vec![0],
 				false,
-				None,
 			);
 			assert_matches!(result, Ok(_));
 		});
@@ -2752,7 +2672,6 @@ mod tests {
 				U256::zero(),
 				vec![0],
 				false,
-				None,
 			);
 			assert_matches!(result, Ok(_));
 		});
@@ -2797,7 +2716,6 @@ mod tests {
 				U256::zero(),
 				vec![0],
 				false,
-				None,
 			);
 			assert_matches!(result, Ok(_));
 		});
@@ -2824,7 +2742,6 @@ mod tests {
 				U256::zero(),
 				vec![0],
 				false,
-				None,
 			);
 			assert_matches!(result, Ok(_));
 		});
@@ -2851,7 +2768,6 @@ mod tests {
 				1u64.into(),
 				vec![0],
 				false,
-				None,
 			);
 			assert_matches!(result, Err(_));
 		});
@@ -2896,7 +2812,6 @@ mod tests {
 				U256::zero(),
 				vec![0],
 				false,
-				None,
 			);
 			assert_matches!(result, Ok(_));
 		});
@@ -2942,7 +2857,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			);
 
 			assert_matches!(result, Ok(_));
@@ -2969,7 +2883,6 @@ mod tests {
 					vec![],
 					Some(&[0; 32]),
 					false,
-					None,
 				),
 				Err(_)
 			);
@@ -3005,7 +2918,6 @@ mod tests {
 						vec![],
 						Some(&[0 ;32]),
 						false,
-						None,
 					),
 					Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address
 				);
@@ -3060,7 +2972,6 @@ mod tests {
 						vec![],
 						Some(&[0; 32]),
 						false,
-						None,
 					),
 					Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address
 				);
@@ -3125,7 +3036,6 @@ mod tests {
 						(min_balance * 10).into(),
 						vec![],
 						false,
-						None,
 					),
 					Ok(_)
 				);
@@ -3206,7 +3116,6 @@ mod tests {
 						U256::zero(),
 						vec![],
 						false,
-						None,
 					),
 					Ok(_)
 				);
@@ -3250,7 +3159,6 @@ mod tests {
 						vec![],
 						Some(&[0; 32]),
 						false,
-						None,
 					),
 					Err(Error::<Test>::TerminatedInConstructor.into())
 				);
@@ -3315,7 +3223,6 @@ mod tests {
 				U256::zero(),
 				vec![0],
 				false,
-				None,
 			);
 			assert_matches!(result, Ok(_));
 		});
@@ -3378,7 +3285,6 @@ mod tests {
 					vec![],
 					Some(&[0; 32]),
 					false,
-					None,
 				);
 				assert_matches!(result, Ok(_));
 			});
@@ -3425,113 +3331,11 @@ mod tests {
 					U256::zero(),
 					vec![],
 					false,
-					None,
 				)
 				.unwrap();
 			});
 	}
 
-	#[test]
-	fn printing_works() {
-		let code_hash = MockLoader::insert(Call, |ctx, _| {
-			ctx.ext.append_debug_buffer("This is a test");
-			ctx.ext.append_debug_buffer("More text");
-			exec_success()
-		});
-
-		let mut debug_buffer = DebugBuffer::try_from(Vec::new()).unwrap();
-
-		ExtBuilder::default().build().execute_with(|| {
-			let min_balance = <Test as Config>::Currency::minimum_balance();
-
-			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-			set_balance(&ALICE, min_balance * 10);
-			place_contract(&BOB, code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut gas_meter,
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-				Some(&mut debug_buffer),
-			)
-			.unwrap();
-		});
-
-		assert_eq!(&String::from_utf8(debug_buffer.to_vec()).unwrap(), "This is a testMore text");
-	}
-
-	#[test]
-	fn printing_works_on_fail() {
-		let code_hash = MockLoader::insert(Call, |ctx, _| {
-			ctx.ext.append_debug_buffer("This is a test");
-			ctx.ext.append_debug_buffer("More text");
-			exec_trapped()
-		});
-
-		let mut debug_buffer = DebugBuffer::try_from(Vec::new()).unwrap();
-
-		ExtBuilder::default().build().execute_with(|| {
-			let min_balance = <Test as Config>::Currency::minimum_balance();
-
-			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-			set_balance(&ALICE, min_balance * 10);
-			place_contract(&BOB, code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			let result = MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut gas_meter,
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-				Some(&mut debug_buffer),
-			);
-			assert!(result.is_err());
-		});
-
-		assert_eq!(&String::from_utf8(debug_buffer.to_vec()).unwrap(), "This is a testMore text");
-	}
-
-	#[test]
-	fn debug_buffer_is_limited() {
-		let code_hash = MockLoader::insert(Call, move |ctx, _| {
-			ctx.ext.append_debug_buffer("overflowing bytes");
-			exec_success()
-		});
-
-		// Pre-fill the buffer almost up to its limit, leaving not enough space to the message
-		let debug_buf_before = DebugBuffer::try_from(vec![0u8; DebugBuffer::bound() - 5]).unwrap();
-		let mut debug_buf_after = debug_buf_before.clone();
-
-		ExtBuilder::default().build().execute_with(|| {
-			let min_balance = <Test as Config>::Currency::minimum_balance();
-			let mut gas_meter = GasMeter::<Test>::new(GAS_LIMIT);
-			set_balance(&ALICE, min_balance * 10);
-			place_contract(&BOB, code_hash);
-			let origin = Origin::from_account_id(ALICE);
-			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
-			MockStack::run_call(
-				origin,
-				BOB_ADDR,
-				&mut gas_meter,
-				&mut storage_meter,
-				U256::zero(),
-				vec![],
-				false,
-				Some(&mut debug_buf_after),
-			)
-			.unwrap();
-			assert_eq!(debug_buf_before, debug_buf_after);
-		});
-	}
-
 	#[test]
 	fn call_reentry_direct_recursion() {
 		// call the contract passed as input with disabled reentry
@@ -3559,7 +3363,6 @@ mod tests {
 				U256::zero(),
 				CHARLIE_ADDR.as_bytes().to_vec(),
 				false,
-				None,
 			));
 
 			// Calling into oneself fails
@@ -3572,7 +3375,6 @@ mod tests {
 					U256::zero(),
 					BOB_ADDR.as_bytes().to_vec(),
 					false,
-					None,
 				)
 				.map_err(|e| e.error),
 				<Error<Test>>::ReentranceDenied,
@@ -3623,7 +3425,6 @@ mod tests {
 					U256::zero(),
 					vec![0],
 					false,
-					None,
 				)
 				.map_err(|e| e.error),
 				<Error<Test>>::ReentranceDenied,
@@ -3658,7 +3459,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			)
 			.unwrap();
 
@@ -3743,7 +3543,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			)
 			.unwrap();
 
@@ -3870,7 +3669,6 @@ mod tests {
 					vec![],
 					Some(&[0; 32]),
 					false,
-					None,
 				)
 				.ok();
 				assert_eq!(System::account_nonce(&ALICE), 0);
@@ -3884,7 +3682,6 @@ mod tests {
 					vec![],
 					Some(&[0; 32]),
 					false,
-					None,
 				));
 				assert_eq!(System::account_nonce(&ALICE), 1);
 
@@ -3897,7 +3694,6 @@ mod tests {
 					vec![],
 					Some(&[0; 32]),
 					false,
-					None,
 				));
 				assert_eq!(System::account_nonce(&ALICE), 2);
 
@@ -3910,7 +3706,6 @@ mod tests {
 					vec![],
 					Some(&[0; 32]),
 					false,
-					None,
 				));
 				assert_eq!(System::account_nonce(&ALICE), 3);
 			});
@@ -3979,7 +3774,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			));
 		});
 	}
@@ -4091,7 +3885,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			));
 		});
 	}
@@ -4131,7 +3924,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			));
 		});
 	}
@@ -4171,7 +3963,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			));
 		});
 	}
@@ -4225,7 +4016,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			));
 		});
 	}
@@ -4282,7 +4072,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			));
 		});
 	}
@@ -4358,7 +4147,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			));
 		});
 	}
@@ -4429,7 +4217,6 @@ mod tests {
 				U256::zero(),
 				vec![0],
 				false,
-				None,
 			);
 			assert_matches!(result, Ok(_));
 		});
@@ -4468,7 +4255,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			));
 		});
 	}
@@ -4531,7 +4317,6 @@ mod tests {
 				U256::zero(),
 				vec![0],
 				false,
-				None,
 			);
 			assert_matches!(result, Ok(_));
 		});
@@ -4565,7 +4350,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			);
 			assert_matches!(result, Ok(_));
 		});
@@ -4641,7 +4425,6 @@ mod tests {
 					U256::zero(),
 					vec![],
 					false,
-					None,
 				)
 				.unwrap()
 			});
@@ -4710,7 +4493,6 @@ mod tests {
 				U256::zero(),
 				vec![0],
 				false,
-				None,
 			);
 			assert_matches!(result, Ok(_));
 		});
@@ -4782,7 +4564,6 @@ mod tests {
 				U256::zero(),
 				vec![],
 				false,
-				None,
 			);
 			assert_matches!(result, Ok(_));
 		});
@@ -4834,7 +4615,6 @@ mod tests {
 					U256::zero(),
 					vec![],
 					false,
-					None,
 				)
 				.unwrap()
 			});
@@ -4904,7 +4684,6 @@ mod tests {
 					U256::zero(),
 					vec![],
 					false,
-					None,
 				)
 				.unwrap()
 			});
@@ -4951,7 +4730,6 @@ mod tests {
 					U256::zero(),
 					vec![],
 					false,
-					None,
 				)
 				.unwrap()
 			});
@@ -4996,7 +4774,6 @@ mod tests {
 					U256::zero(),
 					vec![],
 					false,
-					None,
 				)
 				.unwrap()
 			});
@@ -5052,7 +4829,6 @@ mod tests {
 					U256::zero(),
 					vec![0],
 					false,
-					None,
 				),
 				Ok(_)
 			);
diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs
index bdb4b92edd9..403598ae136 100644
--- a/substrate/frame/revive/src/lib.rs
+++ b/substrate/frame/revive/src/lib.rs
@@ -71,7 +71,7 @@ use frame_support::{
 use frame_system::{
 	ensure_signed,
 	pallet_prelude::{BlockNumberFor, OriginFor},
-	EventRecord, Pallet as System,
+	Pallet as System,
 };
 use pallet_transaction_payment::OnChargeTransaction;
 use scale_info::TypeInfo;
@@ -98,9 +98,6 @@ type BalanceOf<T> =
 	<<T as Config>::Currency as Inspect<<T as frame_system::Config>::AccountId>>::Balance;
 type OnChargeTransactionBalanceOf<T> = <<T as pallet_transaction_payment::Config>::OnChargeTransaction as OnChargeTransaction<T>>::Balance;
 type CodeVec = BoundedVec<u8, ConstU32<{ limits::code::BLOB_BYTES }>>;
-type EventRecordOf<T> =
-	EventRecord<<T as frame_system::Config>::RuntimeEvent, <T as frame_system::Config>::Hash>;
-type DebugBuffer = BoundedVec<u8, ConstU32<{ limits::DEBUG_BUFFER_BYTES }>>;
 type ImmutableData = BoundedVec<u8, ConstU32<{ limits::IMMUTABLE_BYTES }>>;
 
 /// Used as a sentinel value when reading and writing contract memory.
@@ -258,9 +255,9 @@ pub mod pallet {
 		#[pallet::no_default_bounds]
 		type InstantiateOrigin: EnsureOrigin<Self::RuntimeOrigin, Success = Self::AccountId>;
 
-		/// For most production chains, it's recommended to use the `()` implementation of this
-		/// trait. This implementation offers additional logging when the log target
-		/// "runtime::revive" is set to trace.
+		/// Debugging utilities for contracts.
+		/// For production chains, it's recommended to use the `()` implementation of this
+		/// trait.
 		#[pallet::no_default_bounds]
 		type Debug: Debugger<Self>;
 
@@ -810,9 +807,8 @@ pub mod pallet {
 				gas_limit,
 				DepositLimit::Balance(storage_deposit_limit),
 				data,
-				DebugInfo::Skip,
-				CollectEvents::Skip,
 			);
+
 			if let Ok(return_value) = &output.result {
 				if return_value.did_revert() {
 					output.result = Err(<Error<T>>::ContractReverted.into());
@@ -848,8 +844,6 @@ pub mod pallet {
 				Code::Existing(code_hash),
 				data,
 				salt,
-				DebugInfo::Skip,
-				CollectEvents::Skip,
 			);
 			if let Ok(retval) = &output.result {
 				if retval.result.did_revert() {
@@ -914,8 +908,6 @@ pub mod pallet {
 				Code::Upload(code),
 				data,
 				salt,
-				DebugInfo::Skip,
-				CollectEvents::Skip,
 			);
 			if let Ok(retval) = &output.result {
 				if retval.result.did_revert() {
@@ -1085,16 +1077,10 @@ where
 		gas_limit: Weight,
 		storage_deposit_limit: DepositLimit<BalanceOf<T>>,
 		data: Vec<u8>,
-		debug: DebugInfo,
-		collect_events: CollectEvents,
-	) -> ContractResult<ExecReturnValue, BalanceOf<T>, EventRecordOf<T>> {
+	) -> ContractResult<ExecReturnValue, BalanceOf<T>> {
 		let mut gas_meter = GasMeter::new(gas_limit);
 		let mut storage_deposit = Default::default();
-		let mut debug_message = if matches!(debug, DebugInfo::UnsafeDebug) {
-			Some(DebugBuffer::default())
-		} else {
-			None
-		};
+
 		let try_call = || {
 			let origin = Origin::from_runtime_origin(origin)?;
 			let mut storage_meter = match storage_deposit_limit {
@@ -1109,7 +1095,6 @@ where
 				Self::convert_native_to_evm(value),
 				data,
 				storage_deposit_limit.is_unchecked(),
-				debug_message.as_mut(),
 			)?;
 			storage_deposit = storage_meter
 				.try_into_deposit(&origin, storage_deposit_limit.is_unchecked())
@@ -1119,18 +1104,11 @@ where
 			Ok(result)
 		};
 		let result = Self::run_guarded(try_call);
-		let events = if matches!(collect_events, CollectEvents::UnsafeCollect) {
-			Some(System::<T>::read_events_no_consensus().map(|e| *e).collect())
-		} else {
-			None
-		};
 		ContractResult {
 			result: result.map_err(|r| r.error),
 			gas_consumed: gas_meter.gas_consumed(),
 			gas_required: gas_meter.gas_required(),
 			storage_deposit,
-			debug_message: debug_message.unwrap_or_default().to_vec(),
-			events,
 		}
 	}
 
@@ -1138,8 +1116,7 @@ where
 	///
 	/// Identical to [`Self::instantiate`] or [`Self::instantiate_with_code`] but tailored towards
 	/// being called by other code within the runtime as opposed to from an extrinsic. It returns
-	/// more information and allows the enablement of features that are not suitable for an
-	/// extrinsic (debugging, event collection).
+	/// more information to the caller useful to estimate the cost of the operation.
 	pub fn bare_instantiate(
 		origin: OriginFor<T>,
 		value: BalanceOf<T>,
@@ -1148,14 +1125,9 @@ where
 		code: Code,
 		data: Vec<u8>,
 		salt: Option<[u8; 32]>,
-		debug: DebugInfo,
-		collect_events: CollectEvents,
-	) -> ContractResult<InstantiateReturnValue, BalanceOf<T>, EventRecordOf<T>> {
+	) -> ContractResult<InstantiateReturnValue, BalanceOf<T>> {
 		let mut gas_meter = GasMeter::new(gas_limit);
 		let mut storage_deposit = Default::default();
-		let mut debug_message =
-			if debug == DebugInfo::UnsafeDebug { Some(DebugBuffer::default()) } else { None };
-
 		let unchecked_deposit_limit = storage_deposit_limit.is_unchecked();
 		let mut storage_deposit_limit = match storage_deposit_limit {
 			DepositLimit::Balance(limit) => limit,
@@ -1195,7 +1167,6 @@ where
 				data,
 				salt.as_ref(),
 				unchecked_deposit_limit,
-				debug_message.as_mut(),
 			);
 			storage_deposit = storage_meter
 				.try_into_deposit(&instantiate_origin, unchecked_deposit_limit)?
@@ -1203,11 +1174,6 @@ where
 			result
 		};
 		let output = Self::run_guarded(try_instantiate);
-		let events = if matches!(collect_events, CollectEvents::UnsafeCollect) {
-			Some(System::<T>::read_events_no_consensus().map(|e| *e).collect())
-		} else {
-			None
-		};
 		ContractResult {
 			result: output
 				.map(|(addr, result)| InstantiateReturnValue { result, addr })
@@ -1215,8 +1181,6 @@ where
 			gas_consumed: gas_meter.gas_consumed(),
 			gas_required: gas_meter.gas_required(),
 			storage_deposit,
-			debug_message: debug_message.unwrap_or_default().to_vec(),
-			events,
 		}
 	}
 
@@ -1273,8 +1237,6 @@ where
 		};
 
 		let input = tx.input.clone().unwrap_or_default().0;
-		let debug = DebugInfo::Skip;
-		let collect_events = CollectEvents::Skip;
 
 		let extract_error = |err| {
 			if err == Error::<T>::TransferFailed.into() ||
@@ -1305,8 +1267,6 @@ where
 					gas_limit,
 					storage_deposit_limit,
 					input.clone(),
-					debug,
-					collect_events,
 				);
 
 				let data = match result.result {
@@ -1363,8 +1323,6 @@ where
 					Code::Upload(code.to_vec()),
 					data.to_vec(),
 					None,
-					debug,
-					collect_events,
 				);
 
 				let returned_data = match result.result {
@@ -1535,12 +1493,11 @@ environmental!(executing_contract: bool);
 sp_api::decl_runtime_apis! {
 	/// The API used to dry-run contract interactions.
 	#[api_version(1)]
-	pub trait ReviveApi<AccountId, Balance, Nonce, BlockNumber, EventRecord> where
+	pub trait ReviveApi<AccountId, Balance, Nonce, BlockNumber> where
 		AccountId: Codec,
 		Balance: Codec,
 		Nonce: Codec,
 		BlockNumber: Codec,
-		EventRecord: Codec,
 	{
 		/// Returns the free balance of the given `[H160]` address, using EVM decimals.
 		fn balance(address: H160) -> U256;
@@ -1558,7 +1515,7 @@ sp_api::decl_runtime_apis! {
 			gas_limit: Option<Weight>,
 			storage_deposit_limit: Option<Balance>,
 			input_data: Vec<u8>,
-		) -> ContractResult<ExecReturnValue, Balance, EventRecord>;
+		) -> ContractResult<ExecReturnValue, Balance>;
 
 		/// Instantiate a new contract.
 		///
@@ -1571,7 +1528,7 @@ sp_api::decl_runtime_apis! {
 			code: Code,
 			data: Vec<u8>,
 			salt: Option<[u8; 32]>,
-		) -> ContractResult<InstantiateReturnValue, Balance, EventRecord>;
+		) -> ContractResult<InstantiateReturnValue, Balance>;
 
 
 		/// Perform an Ethereum call.
diff --git a/substrate/frame/revive/src/limits.rs b/substrate/frame/revive/src/limits.rs
index 3b55106c67d..f101abf0ea7 100644
--- a/substrate/frame/revive/src/limits.rs
+++ b/substrate/frame/revive/src/limits.rs
@@ -57,11 +57,6 @@ pub const TRANSIENT_STORAGE_BYTES: u32 = 4 * 1024;
 /// The maximum allowable length in bytes for (transient) storage keys.
 pub const STORAGE_KEY_BYTES: u32 = 128;
 
-/// The maximum size of the debug buffer contracts can write messages to.
-///
-/// The buffer will always be disabled for on-chain execution.
-pub const DEBUG_BUFFER_BYTES: u32 = 2 * 1024 * 1024;
-
 /// The page size in which PolkaVM should allocate memory chunks.
 pub const PAGE_SIZE: u32 = 4 * 1024;
 
diff --git a/substrate/frame/revive/src/primitives.rs b/substrate/frame/revive/src/primitives.rs
index 452d2c8a306..9c149c7cc38 100644
--- a/substrate/frame/revive/src/primitives.rs
+++ b/substrate/frame/revive/src/primitives.rs
@@ -63,7 +63,7 @@ impl<T> From<T> for DepositLimit<T> {
 /// `ContractsApi` version. Therefore when SCALE decoding a `ContractResult` its trailing data
 /// should be ignored to avoid any potential compatibility issues.
 #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)]
-pub struct ContractResult<R, Balance, EventRecord> {
+pub struct ContractResult<R, Balance> {
 	/// How much weight was consumed during execution.
 	pub gas_consumed: Weight,
 	/// How much weight is required as gas limit in order to execute this call.
@@ -84,26 +84,8 @@ pub struct ContractResult<R, Balance, EventRecord> {
 	/// is `Err`. This is because on error all storage changes are rolled back including the
 	/// payment of the deposit.
 	pub storage_deposit: StorageDeposit<Balance>,
-	/// An optional debug message. This message is only filled when explicitly requested
-	/// by the code that calls into the contract. Otherwise it is empty.
-	///
-	/// The contained bytes are valid UTF-8. This is not declared as `String` because
-	/// this type is not allowed within the runtime.
-	///
-	/// Clients should not make any assumptions about the format of the buffer.
-	/// They should just display it as-is. It is **not** only a collection of log lines
-	/// provided by a contract but a formatted buffer with different sections.
-	///
-	/// # Note
-	///
-	/// The debug message is never generated during on-chain execution. It is reserved for
-	/// RPC calls.
-	pub debug_message: Vec<u8>,
 	/// The execution result of the wasm code.
 	pub result: Result<R, DispatchError>,
-	/// The events that were emitted during execution. It is an option as event collection is
-	/// optional.
-	pub events: Option<Vec<EventRecord>>,
 }
 
 /// The result of the execution of a `eth_transact` call.
@@ -284,36 +266,3 @@ where
 		}
 	}
 }
-
-/// Determines whether events should be collected during execution.
-#[derive(
-	Copy, Clone, PartialEq, Eq, RuntimeDebug, Decode, Encode, MaxEncodedLen, scale_info::TypeInfo,
-)]
-pub enum CollectEvents {
-	/// Collect events.
-	///
-	/// # Note
-	///
-	/// Events should only be collected when called off-chain, as this would otherwise
-	/// collect all the Events emitted in the block so far and put them into the PoV.
-	///
-	/// **Never** use this mode for on-chain execution.
-	UnsafeCollect,
-	/// Skip event collection.
-	Skip,
-}
-
-/// Determines whether debug messages will be collected.
-#[derive(
-	Copy, Clone, PartialEq, Eq, RuntimeDebug, Decode, Encode, MaxEncodedLen, scale_info::TypeInfo,
-)]
-pub enum DebugInfo {
-	/// Collect debug messages.
-	/// # Note
-	///
-	/// This should only ever be set to `UnsafeDebug` when executing as an RPC because
-	/// it adds allocations and could be abused to drive the runtime into an OOM panic.
-	UnsafeDebug,
-	/// Skip collection of debug messages.
-	Skip,
-}
diff --git a/substrate/frame/revive/src/test_utils/builder.rs b/substrate/frame/revive/src/test_utils/builder.rs
index 8ba5e738407..7fbb5b67643 100644
--- a/substrate/frame/revive/src/test_utils/builder.rs
+++ b/substrate/frame/revive/src/test_utils/builder.rs
@@ -17,9 +17,8 @@
 
 use super::{deposit_limit, GAS_LIMIT};
 use crate::{
-	address::AddressMapper, AccountIdOf, BalanceOf, Code, CollectEvents, Config, ContractResult,
-	DebugInfo, DepositLimit, EventRecordOf, ExecReturnValue, InstantiateReturnValue, OriginFor,
-	Pallet, Weight,
+	address::AddressMapper, AccountIdOf, BalanceOf, Code, Config, ContractResult, DepositLimit,
+	ExecReturnValue, InstantiateReturnValue, OriginFor, Pallet, Weight,
 };
 use frame_support::pallet_prelude::DispatchResultWithPostInfo;
 use paste::paste;
@@ -138,9 +137,7 @@ builder!(
 		code: Code,
 		data: Vec<u8>,
 		salt: Option<[u8; 32]>,
-		debug: DebugInfo,
-		collect_events: CollectEvents,
-	) -> ContractResult<InstantiateReturnValue, BalanceOf<T>, EventRecordOf<T>>;
+	) -> ContractResult<InstantiateReturnValue, BalanceOf<T>>;
 
 	/// Build the instantiate call and unwrap the result.
 	pub fn build_and_unwrap_result(self) -> InstantiateReturnValue {
@@ -164,8 +161,6 @@ builder!(
 			code,
 			data: vec![],
 			salt: Some([0; 32]),
-			debug: DebugInfo::UnsafeDebug,
-			collect_events: CollectEvents::Skip,
 		}
 	}
 );
@@ -201,9 +196,7 @@ builder!(
 		gas_limit: Weight,
 		storage_deposit_limit: DepositLimit<BalanceOf<T>>,
 		data: Vec<u8>,
-		debug: DebugInfo,
-		collect_events: CollectEvents,
-	) -> ContractResult<ExecReturnValue, BalanceOf<T>, EventRecordOf<T>>;
+	) -> ContractResult<ExecReturnValue, BalanceOf<T>>;
 
 	/// Build the call and unwrap the result.
 	pub fn build_and_unwrap_result(self) -> ExecReturnValue {
@@ -219,8 +212,6 @@ builder!(
 			gas_limit: GAS_LIMIT,
 			storage_deposit_limit: DepositLimit::Balance(deposit_limit::<T>()),
 			data: vec![],
-			debug: DebugInfo::UnsafeDebug,
-			collect_events: CollectEvents::Skip,
 		}
 	}
 );
diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs
index cf02d17a4d0..e2b30cf07c8 100644
--- a/substrate/frame/revive/src/tests.rs
+++ b/substrate/frame/revive/src/tests.rs
@@ -38,9 +38,9 @@ use crate::{
 	tests::test_utils::{get_contract, get_contract_checked},
 	wasm::Memory,
 	weights::WeightInfo,
-	AccountId32Mapper, BalanceOf, Code, CodeInfoOf, CollectEvents, Config, ContractInfo,
-	ContractInfoOf, DebugInfo, DeletionQueueCounter, DepositLimit, Error, EthTransactError,
-	HoldReason, Origin, Pallet, PristineCode, H160,
+	AccountId32Mapper, BalanceOf, Code, CodeInfoOf, Config, ContractInfo, ContractInfoOf,
+	DeletionQueueCounter, DepositLimit, Error, EthTransactError, HoldReason, Origin, Pallet,
+	PristineCode, H160,
 };
 
 use crate::test_utils::builder::Contract;
@@ -2184,58 +2184,6 @@ fn refcounter() {
 	});
 }
 
-#[test]
-fn debug_message_works() {
-	let (wasm, _code_hash) = compile_module("debug_message_works").unwrap();
-
-	ExtBuilder::default().existential_deposit(50).build().execute_with(|| {
-		let _ = <Test as Config>::Currency::set_balance(&ALICE, 1_000_000);
-		let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm))
-			.value(30_000)
-			.build_and_unwrap_contract();
-		let result = builder::bare_call(addr).debug(DebugInfo::UnsafeDebug).build();
-
-		assert_matches!(result.result, Ok(_));
-		assert_eq!(std::str::from_utf8(&result.debug_message).unwrap(), "Hello World!");
-	});
-}
-
-#[test]
-fn debug_message_logging_disabled() {
-	let (wasm, _code_hash) = compile_module("debug_message_logging_disabled").unwrap();
-
-	ExtBuilder::default().existential_deposit(50).build().execute_with(|| {
-		let _ = <Test as Config>::Currency::set_balance(&ALICE, 1_000_000);
-		let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm))
-			.value(30_000)
-			.build_and_unwrap_contract();
-		// the dispatchables always run without debugging
-		assert_ok!(Contracts::call(
-			RuntimeOrigin::signed(ALICE),
-			addr,
-			0,
-			GAS_LIMIT,
-			deposit_limit::<Test>(),
-			vec![]
-		));
-	});
-}
-
-#[test]
-fn debug_message_invalid_utf8() {
-	let (wasm, _code_hash) = compile_module("debug_message_invalid_utf8").unwrap();
-
-	ExtBuilder::default().existential_deposit(50).build().execute_with(|| {
-		let _ = <Test as Config>::Currency::set_balance(&ALICE, 1_000_000);
-		let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm))
-			.value(30_000)
-			.build_and_unwrap_contract();
-		let result = builder::bare_call(addr).debug(DebugInfo::UnsafeDebug).build();
-		assert_ok!(result.result);
-		assert!(result.debug_message.is_empty());
-	});
-}
-
 #[test]
 fn gas_estimation_for_subcalls() {
 	let (caller_code, _caller_hash) = compile_module("call_with_limit").unwrap();
@@ -2451,79 +2399,6 @@ fn ecdsa_recover() {
 	})
 }
 
-#[test]
-fn bare_instantiate_returns_events() {
-	let (wasm, _code_hash) = compile_module("transfer_return_code").unwrap();
-	ExtBuilder::default().existential_deposit(50).build().execute_with(|| {
-		let min_balance = Contracts::min_balance();
-		let _ = <Test as Config>::Currency::set_balance(&ALICE, 1000 * min_balance);
-
-		let result = builder::bare_instantiate(Code::Upload(wasm))
-			.value(min_balance * 100)
-			.collect_events(CollectEvents::UnsafeCollect)
-			.build();
-
-		let events = result.events.unwrap();
-		assert!(!events.is_empty());
-		assert_eq!(events, System::events());
-	});
-}
-
-#[test]
-fn bare_instantiate_does_not_return_events() {
-	let (wasm, _code_hash) = compile_module("transfer_return_code").unwrap();
-	ExtBuilder::default().existential_deposit(50).build().execute_with(|| {
-		let min_balance = Contracts::min_balance();
-		let _ = <Test as Config>::Currency::set_balance(&ALICE, 1000 * min_balance);
-
-		let result = builder::bare_instantiate(Code::Upload(wasm)).value(min_balance * 100).build();
-
-		let events = result.events;
-		assert!(!System::events().is_empty());
-		assert!(events.is_none());
-	});
-}
-
-#[test]
-fn bare_call_returns_events() {
-	let (wasm, _code_hash) = compile_module("transfer_return_code").unwrap();
-	ExtBuilder::default().existential_deposit(50).build().execute_with(|| {
-		let min_balance = Contracts::min_balance();
-		let _ = <Test as Config>::Currency::set_balance(&ALICE, 1000 * min_balance);
-
-		let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm))
-			.value(min_balance * 100)
-			.build_and_unwrap_contract();
-
-		let result = builder::bare_call(addr).collect_events(CollectEvents::UnsafeCollect).build();
-
-		let events = result.events.unwrap();
-		assert_return_code!(&result.result.unwrap(), RuntimeReturnCode::Success);
-		assert!(!events.is_empty());
-		assert_eq!(events, System::events());
-	});
-}
-
-#[test]
-fn bare_call_does_not_return_events() {
-	let (wasm, _code_hash) = compile_module("transfer_return_code").unwrap();
-	ExtBuilder::default().existential_deposit(50).build().execute_with(|| {
-		let min_balance = Contracts::min_balance();
-		let _ = <Test as Config>::Currency::set_balance(&ALICE, 1000 * min_balance);
-
-		let Contract { addr, .. } = builder::bare_instantiate(Code::Upload(wasm))
-			.value(min_balance * 100)
-			.build_and_unwrap_contract();
-
-		let result = builder::bare_call(addr).build();
-
-		let events = result.events;
-		assert_return_code!(&result.result.unwrap(), RuntimeReturnCode::Success);
-		assert!(!System::events().is_empty());
-		assert!(events.is_none());
-	});
-}
-
 #[test]
 fn sr25519_verify() {
 	let (wasm, _code_hash) = compile_module("sr25519_verify").unwrap();
@@ -3327,14 +3202,11 @@ fn set_code_hash() {
 		// First call sets new code_hash and returns 1
 		let result = builder::bare_call(contract_addr)
 			.data(new_code_hash.as_ref().to_vec())
-			.debug(DebugInfo::UnsafeDebug)
 			.build_and_unwrap_result();
 		assert_return_code!(result, 1);
 
 		// Second calls new contract code that returns 2
-		let result = builder::bare_call(contract_addr)
-			.debug(DebugInfo::UnsafeDebug)
-			.build_and_unwrap_result();
+		let result = builder::bare_call(contract_addr).build_and_unwrap_result();
 		assert_return_code!(result, 2);
 
 		// Checking for the last event only
@@ -4810,7 +4682,7 @@ fn skip_transfer_works() {
 					..Default::default()
 				},
 				Weight::MAX,
-				|_| 0u32
+				|_| 0u32,
 			),
 			EthTransactError::Message(format!(
 				"insufficient funds for gas * price + value: address {BOB_ADDR:?} have 0 (supplied gas 1)"
@@ -4825,7 +4697,7 @@ fn skip_transfer_works() {
 				..Default::default()
 			},
 			Weight::MAX,
-			|_| 0u32
+			|_| 0u32,
 		));
 
 		let Contract { addr, .. } =
@@ -4844,7 +4716,7 @@ fn skip_transfer_works() {
 					..Default::default()
 				},
 				Weight::MAX,
-				|_| 0u32
+				|_| 0u32,
 			),
 			EthTransactError::Message(format!(
 				"insufficient funds for gas * price + value: address {BOB_ADDR:?} have 0 (supplied gas 1)"
@@ -4869,7 +4741,7 @@ fn skip_transfer_works() {
 		assert_ok!(Pallet::<Test>::bare_eth_transact(
 			GenericTransaction { from: Some(BOB_ADDR), to: Some(addr), ..Default::default() },
 			Weight::MAX,
-			|_| 0u32
+			|_| 0u32,
 		));
 
 		// works when calling from a contract when no gas is specified.
@@ -4881,7 +4753,7 @@ fn skip_transfer_works() {
 				..Default::default()
 			},
 			Weight::MAX,
-			|_| 0u32
+			|_| 0u32,
 		));
 	});
 }
diff --git a/substrate/frame/revive/src/tests/test_debug.rs b/substrate/frame/revive/src/tests/test_debug.rs
index c9e19e52ace..b1fdb2d4744 100644
--- a/substrate/frame/revive/src/tests/test_debug.rs
+++ b/substrate/frame/revive/src/tests/test_debug.rs
@@ -119,8 +119,6 @@ fn debugging_works() {
 			Code::Upload(wasm),
 			vec![],
 			Some([0u8; 32]),
-			DebugInfo::Skip,
-			CollectEvents::Skip,
 		)
 		.result
 		.unwrap()
@@ -204,8 +202,6 @@ fn call_interception_works() {
 			vec![],
 			// some salt to ensure that the address of this contract is unique among all tests
 			Some([0x41; 32]),
-			DebugInfo::Skip,
-			CollectEvents::Skip,
 		)
 		.result
 		.unwrap()
diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs
index 3bd4bde5679..b45d7026ba9 100644
--- a/substrate/frame/revive/src/wasm/mod.rs
+++ b/substrate/frame/revive/src/wasm/mod.rs
@@ -288,7 +288,7 @@ impl<T: Config> WasmBlob<T> {
 		}
 		let engine = polkavm::Engine::new(&config).expect(
 			"on-chain (no_std) use of interpreter is hard coded.
-				interpreter is available on all plattforms; qed",
+				interpreter is available on all platforms; qed",
 		);
 
 		let mut module_config = polkavm::ModuleConfig::new();
diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs
index 8529c7d9e73..1ff6a80840a 100644
--- a/substrate/frame/revive/src/wasm/runtime.rs
+++ b/substrate/frame/revive/src/wasm/runtime.rs
@@ -339,8 +339,6 @@ pub enum RuntimeCosts {
 	Terminate(u32),
 	/// Weight of calling `seal_deposit_event` with the given number of topics and event size.
 	DepositEvent { num_topic: u32, len: u32 },
-	/// Weight of calling `seal_debug_message` per byte of passed message.
-	DebugMessage(u32),
 	/// Weight of calling `seal_set_storage` for the given storage item sizes.
 	SetStorage { old_bytes: u32, new_bytes: u32 },
 	/// Weight of calling `seal_clear_storage` per cleared byte.
@@ -489,7 +487,6 @@ impl<T: Config> Token<T> for RuntimeCosts {
 			WeightToFee => T::WeightInfo::seal_weight_to_fee(),
 			Terminate(locked_dependencies) => T::WeightInfo::seal_terminate(locked_dependencies),
 			DepositEvent { num_topic, len } => T::WeightInfo::seal_deposit_event(num_topic, len),
-			DebugMessage(len) => T::WeightInfo::seal_debug_message(len),
 			SetStorage { new_bytes, old_bytes } => {
 				cost_storage!(write, seal_set_storage, new_bytes, old_bytes)
 			},
@@ -669,10 +666,7 @@ impl<'a, E: Ext, M: ?Sized + Memory<E::T>> Runtime<'a, E, M> {
 		match result {
 			Ok(_) => Ok(ReturnErrorCode::Success),
 			Err(e) => {
-				if self.ext.debug_buffer_enabled() {
-					self.ext.append_debug_buffer("call failed with: ");
-					self.ext.append_debug_buffer(e.into());
-				};
+				log::debug!(target: LOG_TARGET, "call failed with: {e:?}");
 				Ok(ErrorReturnCode::get())
 			},
 		}
@@ -1832,27 +1826,6 @@ pub mod env {
 		self.contains_storage(memory, flags, key_ptr, key_len)
 	}
 
-	/// Emit a custom debug message.
-	/// See [`pallet_revive_uapi::HostFn::debug_message`].
-	fn debug_message(
-		&mut self,
-		memory: &mut M,
-		str_ptr: u32,
-		str_len: u32,
-	) -> Result<ReturnErrorCode, TrapReason> {
-		let str_len = str_len.min(limits::DEBUG_BUFFER_BYTES);
-		self.charge_gas(RuntimeCosts::DebugMessage(str_len))?;
-		if self.ext.append_debug_buffer("") {
-			let data = memory.read(str_ptr, str_len)?;
-			if let Some(msg) = core::str::from_utf8(&data).ok() {
-				self.ext.append_debug_buffer(msg);
-			}
-			Ok(ReturnErrorCode::Success)
-		} else {
-			Ok(ReturnErrorCode::LoggingDisabled)
-		}
-	}
-
 	/// Recovers the ECDSA public key from the given message hash and signature.
 	/// See [`pallet_revive_uapi::HostFn::ecdsa_recover`].
 	fn ecdsa_recover(
@@ -2162,10 +2135,7 @@ pub mod env {
 				Ok(ReturnErrorCode::Success)
 			},
 			Err(e) => {
-				if self.ext.append_debug_buffer("") {
-					self.ext.append_debug_buffer("seal0::xcm_send failed with: ");
-					self.ext.append_debug_buffer(e.into());
-				};
+				log::debug!(target: LOG_TARGET, "seal0::xcm_send failed with: {e:?}");
 				Ok(ReturnErrorCode::XcmSendFailed)
 			},
 		}
diff --git a/substrate/frame/revive/src/weights.rs b/substrate/frame/revive/src/weights.rs
index e35ba5ca076..06495d5d21a 100644
--- a/substrate/frame/revive/src/weights.rs
+++ b/substrate/frame/revive/src/weights.rs
@@ -96,7 +96,6 @@ pub trait WeightInfo {
 	fn seal_return(n: u32, ) -> Weight;
 	fn seal_terminate(n: u32, ) -> Weight;
 	fn seal_deposit_event(t: u32, n: u32, ) -> Weight;
-	fn seal_debug_message(i: u32, ) -> Weight;
 	fn get_storage_empty() -> Weight;
 	fn get_storage_full() -> Weight;
 	fn set_storage_empty() -> Weight;
@@ -643,16 +642,6 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 			// Standard Error: 34
 			.saturating_add(Weight::from_parts(774, 0).saturating_mul(n.into()))
 	}
-	/// The range of component `i` is `[0, 262144]`.
-	fn seal_debug_message(i: u32, ) -> Weight {
-		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 340_000 picoseconds.
-		Weight::from_parts(306_527, 0)
-			// Standard Error: 1
-			.saturating_add(Weight::from_parts(728, 0).saturating_mul(i.into()))
-	}
 	/// Storage: `Skipped::Metadata` (r:0 w:0)
 	/// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	fn get_storage_empty() -> Weight {
@@ -1539,16 +1528,6 @@ impl WeightInfo for () {
 			// Standard Error: 34
 			.saturating_add(Weight::from_parts(774, 0).saturating_mul(n.into()))
 	}
-	/// The range of component `i` is `[0, 262144]`.
-	fn seal_debug_message(i: u32, ) -> Weight {
-		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 340_000 picoseconds.
-		Weight::from_parts(306_527, 0)
-			// Standard Error: 1
-			.saturating_add(Weight::from_parts(728, 0).saturating_mul(i.into()))
-	}
 	/// Storage: `Skipped::Metadata` (r:0 w:0)
 	/// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	fn get_storage_empty() -> Weight {
diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs
index ba0a63b15c3..b82393826dd 100644
--- a/substrate/frame/revive/uapi/src/host.rs
+++ b/substrate/frame/revive/uapi/src/host.rs
@@ -515,26 +515,6 @@ pub trait HostFn: private::Sealed {
 	#[unstable_hostfn]
 	fn contains_storage(flags: StorageFlags, key: &[u8]) -> Option<u32>;
 
-	/// Emit a custom debug message.
-	///
-	/// No newlines are added to the supplied message.
-	/// Specifying invalid UTF-8 just drops the message with no trap.
-	///
-	/// This is a no-op if debug message recording is disabled which is always the case
-	/// when the code is executing on-chain. The message is interpreted as UTF-8 and
-	/// appended to the debug buffer which is then supplied to the calling RPC client.
-	///
-	/// # Note
-	///
-	/// Even though no action is taken when debug message recording is disabled there is still
-	/// a non trivial overhead (and weight cost) associated with calling this function. Contract
-	/// languages should remove calls to this function (either at runtime or compile time) when
-	/// not being executed as an RPC. For example, they could allow users to disable logging
-	/// through compile time flags (cargo features) for on-chain deployment. Additionally, the
-	/// return value of this function can be cached in order to prevent further calls at runtime.
-	#[unstable_hostfn]
-	fn debug_message(str: &[u8]) -> Result;
-
 	/// Recovers the ECDSA public key from the given message hash and signature.
 	///
 	/// Writes the public key into the given output buffer.
diff --git a/substrate/frame/revive/uapi/src/host/riscv64.rs b/substrate/frame/revive/uapi/src/host/riscv64.rs
index 8c40bc9f48e..0023b8aa721 100644
--- a/substrate/frame/revive/uapi/src/host/riscv64.rs
+++ b/substrate/frame/revive/uapi/src/host/riscv64.rs
@@ -109,7 +109,6 @@ mod sys {
 			out_ptr: *mut u8,
 			out_len_ptr: *mut u32,
 		) -> ReturnCode;
-		pub fn debug_message(str_ptr: *const u8, str_len: u32) -> ReturnCode;
 		pub fn call_runtime(call_ptr: *const u8, call_len: u32) -> ReturnCode;
 		pub fn ecdsa_recover(
 			signature_ptr: *const u8,
@@ -519,12 +518,6 @@ impl HostFn for HostFnImpl {
 		ret_code.into()
 	}
 
-	#[unstable_hostfn]
-	fn debug_message(str: &[u8]) -> Result {
-		let ret_code = unsafe { sys::debug_message(str.as_ptr(), str.len() as u32) };
-		ret_code.into()
-	}
-
 	#[unstable_hostfn]
 	fn ecdsa_recover(
 		signature: &[u8; 65],
diff --git a/substrate/frame/revive/uapi/src/lib.rs b/substrate/frame/revive/uapi/src/lib.rs
index ef1798b4bf6..867f3563398 100644
--- a/substrate/frame/revive/uapi/src/lib.rs
+++ b/substrate/frame/revive/uapi/src/lib.rs
@@ -86,9 +86,8 @@ define_error_codes! {
 	/// Transfer failed for other not further specified reason. Most probably
 	/// reserved or locked balance of the sender that was preventing the transfer.
 	TransferFailed = 4,
-	/// The call to `debug_message` had no effect because debug message
-	/// recording was disabled.
-	LoggingDisabled = 5,
+	/// The subcall ran out of weight or storage deposit.
+	OutOfResources = 5,
 	/// The call dispatched by `call_runtime` was executed but returned an error.
 	CallRuntimeFailed = 6,
 	/// ECDSA public key recovery failed. Most probably wrong recovery id or signature.
@@ -99,8 +98,6 @@ define_error_codes! {
 	XcmExecutionFailed = 9,
 	/// The `xcm_send` call failed.
 	XcmSendFailed = 10,
-	/// The subcall ran out of weight or storage deposit.
-	OutOfResources = 11,
 }
 
 /// The raw return code returned by the host side.
-- 
GitLab


From 5be65872188a4ac1bf76333af3958b65f2a9629e Mon Sep 17 00:00:00 2001
From: PG Herveou <pgherveou@gmail.com>
Date: Wed, 15 Jan 2025 20:23:54 +0100
Subject: [PATCH 065/116] [pallet-revive] Remove revive events (#7164)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Remove all pallet::events except for the `ContractEmitted` event that is
emitted by contracts

---------

Co-authored-by: command-bot <>
Co-authored-by: Alexander Theißen <alex.theissen@me.com>
---
 prdoc/pr_7164.prdoc                         |   8 +
 substrate/frame/revive/src/exec.rs          | 103 +------
 substrate/frame/revive/src/lib.rs           |  72 -----
 substrate/frame/revive/src/storage/meter.rs |  16 +-
 substrate/frame/revive/src/tests.rs         | 323 +-------------------
 substrate/frame/revive/src/wasm/mod.rs      |  18 +-
 6 files changed, 40 insertions(+), 500 deletions(-)
 create mode 100644 prdoc/pr_7164.prdoc

diff --git a/prdoc/pr_7164.prdoc b/prdoc/pr_7164.prdoc
new file mode 100644
index 00000000000..cb0410a9de7
--- /dev/null
+++ b/prdoc/pr_7164.prdoc
@@ -0,0 +1,8 @@
+title: '[pallet-revive] Remove revive events'
+doc:
+- audience: Runtime Dev
+  description: Remove all pallet::events except for the `ContractEmitted` event that
+    is emitted by contracts
+crates:
+- name: pallet-revive
+  bump: major
diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs
index e20c5dd7786..1c6ca435aef 100644
--- a/substrate/frame/revive/src/exec.rs
+++ b/substrate/frame/revive/src/exec.rs
@@ -1092,34 +1092,11 @@ where
 				.enforce_limit(contract)
 				.map_err(|e| ExecError { error: e, origin: ErrorOrigin::Callee })?;
 
-			let account_id = T::AddressMapper::to_address(&frame.account_id);
-			match (entry_point, delegated_code_hash) {
-				(ExportedFunction::Constructor, _) => {
-					// It is not allowed to terminate a contract inside its constructor.
-					if matches!(frame.contract_info, CachedContract::Terminated) {
-						return Err(Error::<T>::TerminatedInConstructor.into());
-					}
-
-					let caller = T::AddressMapper::to_address(self.caller().account_id()?);
-					// Deposit an instantiation event.
-					Contracts::<T>::deposit_event(Event::Instantiated {
-						deployer: caller,
-						contract: account_id,
-					});
-				},
-				(ExportedFunction::Call, Some(code_hash)) => {
-					Contracts::<T>::deposit_event(Event::DelegateCalled {
-						contract: account_id,
-						code_hash,
-					});
-				},
-				(ExportedFunction::Call, None) => {
-					let caller = self.caller();
-					Contracts::<T>::deposit_event(Event::Called {
-						caller: caller.clone(),
-						contract: account_id,
-					});
-				},
+			// It is not allowed to terminate a contract inside its constructor.
+			if entry_point == ExportedFunction::Constructor &&
+				matches!(frame.contract_info, CachedContract::Terminated)
+			{
+				return Err(Error::<T>::TerminatedInConstructor.into());
 			}
 
 			Ok(output)
@@ -1526,10 +1503,6 @@ where
 				.charge_deposit(frame.account_id.clone(), StorageDeposit::Refund(*deposit));
 		}
 
-		Contracts::<T>::deposit_event(Event::Terminated {
-			contract: account_address,
-			beneficiary: *beneficiary,
-		});
 		Ok(())
 	}
 
@@ -1782,11 +1755,6 @@ where
 
 		Self::increment_refcount(hash)?;
 		Self::decrement_refcount(prev_hash);
-		Contracts::<Self::T>::deposit_event(Event::ContractCodeUpdated {
-			contract: T::AddressMapper::to_address(&frame.account_id),
-			new_code_hash: hash,
-			old_code_hash: prev_hash,
-		});
 		Ok(())
 	}
 
@@ -2933,13 +2901,6 @@ mod tests {
 					ContractInfo::<Test>::load_code_hash(&instantiated_contract_id).unwrap(),
 					dummy_ch
 				);
-				assert_eq!(
-					&events(),
-					&[Event::Instantiated {
-						deployer: ALICE_ADDR,
-						contract: instantiated_contract_address
-					}]
-				);
 			});
 	}
 
@@ -3055,19 +3016,6 @@ mod tests {
 					ContractInfo::<Test>::load_code_hash(&instantiated_contract_id).unwrap(),
 					dummy_ch
 				);
-				assert_eq!(
-					&events(),
-					&[
-						Event::Instantiated {
-							deployer: BOB_ADDR,
-							contract: instantiated_contract_address
-						},
-						Event::Called {
-							caller: Origin::from_account_id(ALICE),
-							contract: BOB_ADDR
-						},
-					]
-				);
 			});
 	}
 
@@ -3119,13 +3067,6 @@ mod tests {
 					),
 					Ok(_)
 				);
-
-				// The contract wasn't instantiated so we don't expect to see an instantiation
-				// event here.
-				assert_eq!(
-					&events(),
-					&[Event::Called { caller: Origin::from_account_id(ALICE), contract: BOB_ADDR },]
-				);
 			});
 	}
 
@@ -3465,24 +3406,14 @@ mod tests {
 			let remark_hash = <Test as frame_system::Config>::Hashing::hash(b"Hello World");
 			assert_eq!(
 				System::events(),
-				vec![
-					EventRecord {
-						phase: Phase::Initialization,
-						event: MetaEvent::System(frame_system::Event::Remarked {
-							sender: BOB_FALLBACK,
-							hash: remark_hash
-						}),
-						topics: vec![],
-					},
-					EventRecord {
-						phase: Phase::Initialization,
-						event: MetaEvent::Contracts(crate::Event::Called {
-							caller: Origin::from_account_id(ALICE),
-							contract: BOB_ADDR,
-						}),
-						topics: vec![],
-					},
-				]
+				vec![EventRecord {
+					phase: Phase::Initialization,
+					event: MetaEvent::System(frame_system::Event::Remarked {
+						sender: BOB_FALLBACK,
+						hash: remark_hash
+					}),
+					topics: vec![],
+				},]
 			);
 		});
 	}
@@ -3571,14 +3502,6 @@ mod tests {
 						},),
 						topics: vec![],
 					},
-					EventRecord {
-						phase: Phase::Initialization,
-						event: MetaEvent::Contracts(crate::Event::Called {
-							caller: Origin::from_account_id(ALICE),
-							contract: BOB_ADDR,
-						}),
-						topics: vec![],
-					},
 				]
 			);
 		});
diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs
index 403598ae136..a9f2842c35f 100644
--- a/substrate/frame/revive/src/lib.rs
+++ b/substrate/frame/revive/src/lib.rs
@@ -379,25 +379,6 @@ pub mod pallet {
 
 	#[pallet::event]
 	pub enum Event<T: Config> {
-		/// Contract deployed by address at the specified address.
-		Instantiated { deployer: H160, contract: H160 },
-
-		/// Contract has been removed.
-		///
-		/// # Note
-		///
-		/// The only way for a contract to be removed and emitting this event is by calling
-		/// `seal_terminate`.
-		Terminated {
-			/// The contract that was terminated.
-			contract: H160,
-			/// The account that received the contracts remaining balance
-			beneficiary: H160,
-		},
-
-		/// Code with the specified hash has been stored.
-		CodeStored { code_hash: H256, deposit_held: BalanceOf<T>, uploader: H160 },
-
 		/// A custom event emitted by the contract.
 		ContractEmitted {
 			/// The contract that emitted the event.
@@ -409,54 +390,6 @@ pub mod pallet {
 			/// Number of topics is capped by [`limits::NUM_EVENT_TOPICS`].
 			topics: Vec<H256>,
 		},
-
-		/// A code with the specified hash was removed.
-		CodeRemoved { code_hash: H256, deposit_released: BalanceOf<T>, remover: H160 },
-
-		/// A contract's code was updated.
-		ContractCodeUpdated {
-			/// The contract that has been updated.
-			contract: H160,
-			/// New code hash that was set for the contract.
-			new_code_hash: H256,
-			/// Previous code hash of the contract.
-			old_code_hash: H256,
-		},
-
-		/// A contract was called either by a plain account or another contract.
-		///
-		/// # Note
-		///
-		/// Please keep in mind that like all events this is only emitted for successful
-		/// calls. This is because on failure all storage changes including events are
-		/// rolled back.
-		Called {
-			/// The caller of the `contract`.
-			caller: Origin<T>,
-			/// The contract that was called.
-			contract: H160,
-		},
-
-		/// A contract delegate called a code hash.
-		///
-		/// # Note
-		///
-		/// Please keep in mind that like all events this is only emitted for successful
-		/// calls. This is because on failure all storage changes including events are
-		/// rolled back.
-		DelegateCalled {
-			/// The contract that performed the delegate call and hence in whose context
-			/// the `code_hash` is executed.
-			contract: H160,
-			/// The code hash that was delegate called.
-			code_hash: H256,
-		},
-
-		/// Some funds have been transferred and held as storage deposit.
-		StorageDepositTransferredAndHeld { from: H160, to: H160, amount: BalanceOf<T> },
-
-		/// Some storage deposit funds have been transferred and released.
-		StorageDepositTransferredAndReleased { from: H160, to: H160, amount: BalanceOf<T> },
 	}
 
 	#[pallet::error]
@@ -985,11 +918,6 @@ pub mod pallet {
 				};
 				<ExecStack<T, WasmBlob<T>>>::increment_refcount(code_hash)?;
 				<ExecStack<T, WasmBlob<T>>>::decrement_refcount(contract.code_hash);
-				Self::deposit_event(Event::ContractCodeUpdated {
-					contract: dest,
-					new_code_hash: code_hash,
-					old_code_hash: contract.code_hash,
-				});
 				contract.code_hash = code_hash;
 				Ok(())
 			})
diff --git a/substrate/frame/revive/src/storage/meter.rs b/substrate/frame/revive/src/storage/meter.rs
index 4febcb0c406..cd390c86f63 100644
--- a/substrate/frame/revive/src/storage/meter.rs
+++ b/substrate/frame/revive/src/storage/meter.rs
@@ -18,8 +18,8 @@
 //! This module contains functions to meter the storage deposit.
 
 use crate::{
-	address::AddressMapper, storage::ContractInfo, AccountIdOf, BalanceOf, CodeInfo, Config, Error,
-	Event, HoldReason, Inspect, Origin, Pallet, StorageDeposit as Deposit, System, LOG_TARGET,
+	storage::ContractInfo, AccountIdOf, BalanceOf, CodeInfo, Config, Error, HoldReason, Inspect,
+	Origin, Pallet, StorageDeposit as Deposit, System, LOG_TARGET,
 };
 use alloc::vec::Vec;
 use core::{fmt::Debug, marker::PhantomData};
@@ -516,12 +516,6 @@ impl<T: Config> Ext<T> for ReservingExt {
 					Preservation::Preserve,
 					Fortitude::Polite,
 				)?;
-
-				Pallet::<T>::deposit_event(Event::StorageDepositTransferredAndHeld {
-					from: T::AddressMapper::to_address(origin),
-					to: T::AddressMapper::to_address(contract),
-					amount: *amount,
-				});
 			},
 			Deposit::Refund(amount) => {
 				let transferred = T::Currency::transfer_on_hold(
@@ -534,12 +528,6 @@ impl<T: Config> Ext<T> for ReservingExt {
 					Fortitude::Polite,
 				)?;
 
-				Pallet::<T>::deposit_event(Event::StorageDepositTransferredAndReleased {
-					from: T::AddressMapper::to_address(contract),
-					to: T::AddressMapper::to_address(origin),
-					amount: transferred,
-				});
-
 				if transferred < *amount {
 					// This should never happen, if it does it means that there is a bug in the
 					// runtime logic. In the rare case this happens we try to refund as much as we
diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs
index e2b30cf07c8..35940f544d0 100644
--- a/substrate/frame/revive/src/tests.rs
+++ b/substrate/frame/revive/src/tests.rs
@@ -713,25 +713,6 @@ fn instantiate_and_call_and_deposit_event() {
 					}),
 					topics: vec![],
 				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::Instantiated {
-						deployer: ALICE_ADDR,
-						contract: addr
-					}),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(
-						pallet_revive::Event::StorageDepositTransferredAndHeld {
-							from: ALICE_ADDR,
-							to: addr,
-							amount: test_utils::contract_info_storage_deposit(&addr),
-						}
-					),
-					topics: vec![],
-				},
 			]
 		);
 	});
@@ -1078,14 +1059,6 @@ fn deploy_and_call_other_contract() {
 					}),
 					topics: vec![],
 				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::Instantiated {
-						deployer: caller_addr,
-						contract: callee_addr,
-					}),
-					topics: vec![],
-				},
 				EventRecord {
 					phase: Phase::Initialization,
 					event: RuntimeEvent::Balances(pallet_balances::Event::Transfer {
@@ -1095,33 +1068,6 @@ fn deploy_and_call_other_contract() {
 					}),
 					topics: vec![],
 				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::Called {
-						caller: Origin::from_account_id(caller_account.clone()),
-						contract: callee_addr,
-					}),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::Called {
-						caller: Origin::from_account_id(ALICE),
-						contract: caller_addr,
-					}),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(
-						pallet_revive::Event::StorageDepositTransferredAndHeld {
-							from: ALICE_ADDR,
-							to: callee_addr,
-							amount: test_utils::contract_info_storage_deposit(&callee_addr),
-						}
-					),
-					topics: vec![],
-				},
 			]
 		);
 	});
@@ -1373,8 +1319,6 @@ fn self_destruct_works() {
 		// Check that the BOB contract has been instantiated.
 		let _ = get_contract(&contract.addr);
 
-		let info_deposit = test_utils::contract_info_storage_deposit(&contract.addr);
-
 		// Drop all previous events
 		initialize_block(2);
 
@@ -1404,33 +1348,6 @@ fn self_destruct_works() {
 		pretty_assertions::assert_eq!(
 			System::events(),
 			vec![
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::Terminated {
-						contract: contract.addr,
-						beneficiary: DJANGO_ADDR,
-					}),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::Called {
-						caller: Origin::from_account_id(ALICE),
-						contract: contract.addr,
-					}),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(
-						pallet_revive::Event::StorageDepositTransferredAndReleased {
-							from: contract.addr,
-							to: ALICE_ADDR,
-							amount: info_deposit,
-						}
-					),
-					topics: vec![],
-				},
 				EventRecord {
 					phase: Phase::Initialization,
 					event: RuntimeEvent::System(frame_system::Event::KilledAccount {
@@ -2512,23 +2429,9 @@ fn upload_code_works() {
 		initialize_block(2);
 
 		assert!(!PristineCode::<Test>::contains_key(&code_hash));
-
 		assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, 1_000,));
 		// Ensure the contract was stored and get expected deposit amount to be reserved.
-		let deposit_expected = expected_deposit(ensure_stored(code_hash));
-
-		assert_eq!(
-			System::events(),
-			vec![EventRecord {
-				phase: Phase::Initialization,
-				event: RuntimeEvent::Contracts(crate::Event::CodeStored {
-					code_hash,
-					deposit_held: deposit_expected,
-					uploader: ALICE_ADDR
-				}),
-				topics: vec![],
-			},]
-		);
+		expected_deposit(ensure_stored(code_hash));
 	});
 }
 
@@ -2586,32 +2489,8 @@ fn remove_code_works() {
 
 		assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, 1_000,));
 		// Ensure the contract was stored and get expected deposit amount to be reserved.
-		let deposit_expected = expected_deposit(ensure_stored(code_hash));
-
+		expected_deposit(ensure_stored(code_hash));
 		assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash));
-		assert_eq!(
-			System::events(),
-			vec![
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::CodeStored {
-						code_hash,
-						deposit_held: deposit_expected,
-						uploader: ALICE_ADDR
-					}),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::CodeRemoved {
-						code_hash,
-						deposit_released: deposit_expected,
-						remover: ALICE_ADDR
-					}),
-					topics: vec![],
-				},
-			]
-		);
 	});
 }
 
@@ -2627,25 +2506,12 @@ fn remove_code_wrong_origin() {
 
 		assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, 1_000,));
 		// Ensure the contract was stored and get expected deposit amount to be reserved.
-		let deposit_expected = expected_deposit(ensure_stored(code_hash));
+		expected_deposit(ensure_stored(code_hash));
 
 		assert_noop!(
 			Contracts::remove_code(RuntimeOrigin::signed(BOB), code_hash),
 			sp_runtime::traits::BadOrigin,
 		);
-
-		assert_eq!(
-			System::events(),
-			vec![EventRecord {
-				phase: Phase::Initialization,
-				event: RuntimeEvent::Contracts(crate::Event::CodeStored {
-					code_hash,
-					deposit_held: deposit_expected,
-					uploader: ALICE_ADDR
-				}),
-				topics: vec![],
-			},]
-		);
 	});
 }
 
@@ -2704,7 +2570,7 @@ fn instantiate_with_zero_balance_works() {
 			builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract();
 
 		// Ensure the contract was stored and get expected deposit amount to be reserved.
-		let deposit_expected = expected_deposit(ensure_stored(code_hash));
+		expected_deposit(ensure_stored(code_hash));
 
 		// Make sure the account exists even though no free balance was send
 		assert_eq!(<Test as Config>::Currency::free_balance(&account_id), min_balance);
@@ -2716,15 +2582,6 @@ fn instantiate_with_zero_balance_works() {
 		assert_eq!(
 			System::events(),
 			vec![
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::CodeStored {
-						code_hash,
-						deposit_held: deposit_expected,
-						uploader: ALICE_ADDR
-					}),
-					topics: vec![],
-				},
 				EventRecord {
 					phase: Phase::Initialization,
 					event: RuntimeEvent::System(frame_system::Event::NewAccount {
@@ -2749,25 +2606,6 @@ fn instantiate_with_zero_balance_works() {
 					}),
 					topics: vec![],
 				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::Instantiated {
-						deployer: ALICE_ADDR,
-						contract: addr,
-					}),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(
-						pallet_revive::Event::StorageDepositTransferredAndHeld {
-							from: ALICE_ADDR,
-							to: addr,
-							amount: test_utils::contract_info_storage_deposit(&addr),
-						}
-					),
-					topics: vec![],
-				},
 			]
 		);
 	});
@@ -2790,7 +2628,7 @@ fn instantiate_with_below_existential_deposit_works() {
 			.build_and_unwrap_contract();
 
 		// Ensure the contract was stored and get expected deposit amount to be reserved.
-		let deposit_expected = expected_deposit(ensure_stored(code_hash));
+		expected_deposit(ensure_stored(code_hash));
 		// Make sure the account exists even though not enough free balance was send
 		assert_eq!(<Test as Config>::Currency::free_balance(&account_id), min_balance + value);
 		assert_eq!(
@@ -2801,15 +2639,6 @@ fn instantiate_with_below_existential_deposit_works() {
 		assert_eq!(
 			System::events(),
 			vec![
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::CodeStored {
-						code_hash,
-						deposit_held: deposit_expected,
-						uploader: ALICE_ADDR
-					}),
-					topics: vec![],
-				},
 				EventRecord {
 					phase: Phase::Initialization,
 					event: RuntimeEvent::System(frame_system::Event::NewAccount {
@@ -2843,25 +2672,6 @@ fn instantiate_with_below_existential_deposit_works() {
 					}),
 					topics: vec![],
 				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::Instantiated {
-						deployer: ALICE_ADDR,
-						contract: addr,
-					}),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(
-						pallet_revive::Event::StorageDepositTransferredAndHeld {
-							from: ALICE_ADDR,
-							to: addr,
-							amount: test_utils::contract_info_storage_deposit(&addr),
-						}
-					),
-					topics: vec![],
-				},
 			]
 		);
 	});
@@ -2903,74 +2713,15 @@ fn storage_deposit_works() {
 
 		assert_eq!(
 			System::events(),
-			vec![
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Balances(pallet_balances::Event::Transfer {
-						from: ALICE,
-						to: account_id.clone(),
-						amount: 42,
-					}),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::Called {
-						caller: Origin::from_account_id(ALICE),
-						contract: addr,
-					}),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(
-						pallet_revive::Event::StorageDepositTransferredAndHeld {
-							from: ALICE_ADDR,
-							to: addr,
-							amount: charged0,
-						}
-					),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::Called {
-						caller: Origin::from_account_id(ALICE),
-						contract: addr,
-					}),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(
-						pallet_revive::Event::StorageDepositTransferredAndHeld {
-							from: ALICE_ADDR,
-							to: addr,
-							amount: charged1,
-						}
-					),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::Called {
-						caller: Origin::from_account_id(ALICE),
-						contract: addr,
-					}),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(
-						pallet_revive::Event::StorageDepositTransferredAndReleased {
-							from: addr,
-							to: ALICE_ADDR,
-							amount: refunded0,
-						}
-					),
-					topics: vec![],
-				},
-			]
+			vec![EventRecord {
+				phase: Phase::Initialization,
+				event: RuntimeEvent::Balances(pallet_balances::Event::Transfer {
+					from: ALICE,
+					to: account_id.clone(),
+					amount: 42,
+				}),
+				topics: vec![],
+			},]
 		);
 	});
 }
@@ -3063,18 +2814,6 @@ fn set_code_extrinsic() {
 		assert_eq!(get_contract(&addr).code_hash, new_code_hash);
 		assert_refcount!(&code_hash, 0);
 		assert_refcount!(&new_code_hash, 1);
-		assert_eq!(
-			System::events(),
-			vec![EventRecord {
-				phase: Phase::Initialization,
-				event: RuntimeEvent::Contracts(pallet_revive::Event::ContractCodeUpdated {
-					contract: addr,
-					new_code_hash,
-					old_code_hash: code_hash,
-				}),
-				topics: vec![],
-			},]
-		);
 	});
 }
 
@@ -3180,7 +2919,7 @@ fn contract_reverted() {
 
 #[test]
 fn set_code_hash() {
-	let (wasm, code_hash) = compile_module("set_code_hash").unwrap();
+	let (wasm, _) = compile_module("set_code_hash").unwrap();
 	let (new_wasm, new_code_hash) = compile_module("new_set_code_hash_contract").unwrap();
 
 	ExtBuilder::default().existential_deposit(100).build().execute_with(|| {
@@ -3208,38 +2947,6 @@ fn set_code_hash() {
 		// Second calls new contract code that returns 2
 		let result = builder::bare_call(contract_addr).build_and_unwrap_result();
 		assert_return_code!(result, 2);
-
-		// Checking for the last event only
-		assert_eq!(
-			&System::events(),
-			&[
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::ContractCodeUpdated {
-						contract: contract_addr,
-						new_code_hash,
-						old_code_hash: code_hash,
-					}),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::Called {
-						caller: Origin::from_account_id(ALICE),
-						contract: contract_addr,
-					}),
-					topics: vec![],
-				},
-				EventRecord {
-					phase: Phase::Initialization,
-					event: RuntimeEvent::Contracts(crate::Event::Called {
-						caller: Origin::from_account_id(ALICE),
-						contract: contract_addr,
-					}),
-					topics: vec![],
-				},
-			],
-		);
 	});
 }
 
diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs
index b45d7026ba9..527cf163095 100644
--- a/substrate/frame/revive/src/wasm/mod.rs
+++ b/substrate/frame/revive/src/wasm/mod.rs
@@ -29,14 +29,13 @@ pub use crate::wasm::runtime::{ReturnData, TrapReason};
 pub use crate::wasm::runtime::{Memory, Runtime, RuntimeCosts};
 
 use crate::{
-	address::AddressMapper,
 	exec::{ExecResult, Executable, ExportedFunction, Ext},
 	gas::{GasMeter, Token},
 	limits,
 	storage::meter::Diff,
 	weights::WeightInfo,
-	AccountIdOf, BadOrigin, BalanceOf, CodeInfoOf, CodeVec, Config, Error, Event, ExecError,
-	HoldReason, Pallet, PristineCode, Weight, LOG_TARGET,
+	AccountIdOf, BadOrigin, BalanceOf, CodeInfoOf, CodeVec, Config, Error, ExecError, HoldReason,
+	PristineCode, Weight, LOG_TARGET,
 };
 use alloc::vec::Vec;
 use codec::{Decode, Encode, MaxEncodedLen};
@@ -157,16 +156,9 @@ where
 					code_info.deposit,
 					BestEffort,
 				);
-				let deposit_released = code_info.deposit;
-				let remover = T::AddressMapper::to_address(&code_info.owner);
 
 				*existing = None;
 				<PristineCode<T>>::remove(&code_hash);
-				<Pallet<T>>::deposit_event(Event::CodeRemoved {
-					code_hash,
-					deposit_released,
-					remover,
-				});
 				Ok(())
 			} else {
 				Err(<Error<T>>::CodeNotFound.into())
@@ -202,12 +194,6 @@ where
 					self.code_info.refcount = 0;
 					<PristineCode<T>>::insert(code_hash, &self.code);
 					*stored_code_info = Some(self.code_info.clone());
-					let uploader = T::AddressMapper::to_address(&self.code_info.owner);
-					<Pallet<T>>::deposit_event(Event::CodeStored {
-						code_hash,
-						deposit_held: deposit,
-						uploader,
-					});
 					Ok(deposit)
 				},
 			}
-- 
GitLab


From 412aca6c48a01f11318228f4d8a79fec544a22bc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Michael=20M=C3=BCller?= <mich@elmueller.net>
Date: Wed, 15 Jan 2025 22:58:51 +0100
Subject: [PATCH 066/116] [pallet-revive] Add host function `to_account_id`
 (#7091)

Closes https://github.com/paritytech/polkadot-sdk/issues/6891.

cc @athei @xermicus @pgherveou
---
 prdoc/pr_7091.prdoc                           | 12 ++++++
 .../fixtures/contracts/to_account_id.rs       | 40 ++++++++++++++++++
 .../frame/revive/src/benchmarking/mod.rs      | 32 +++++++++++++++
 substrate/frame/revive/src/exec.rs            | 41 +++++++++++++++++++
 substrate/frame/revive/src/tests.rs           | 37 +++++++++++++++++
 substrate/frame/revive/src/wasm/runtime.rs    | 24 +++++++++++
 substrate/frame/revive/src/weights.rs         | 21 ++++++++++
 substrate/frame/revive/uapi/src/host.rs       | 12 ++++++
 .../frame/revive/uapi/src/host/riscv64.rs     |  6 +++
 9 files changed, 225 insertions(+)
 create mode 100644 prdoc/pr_7091.prdoc
 create mode 100644 substrate/frame/revive/fixtures/contracts/to_account_id.rs

diff --git a/prdoc/pr_7091.prdoc b/prdoc/pr_7091.prdoc
new file mode 100644
index 00000000000..badea4e82fd
--- /dev/null
+++ b/prdoc/pr_7091.prdoc
@@ -0,0 +1,12 @@
+title: '[pallet-revive] Add new host function `to_account_id`'
+doc:
+- audience: Runtime Dev
+  description: A new host function `to_account_id` is added. It allows retrieving
+    the account id for a `H160` address.
+crates:
+- name: pallet-revive-fixtures
+  bump: minor
+- name: pallet-revive
+  bump: minor
+- name: pallet-revive-uapi
+  bump: minor
diff --git a/substrate/frame/revive/fixtures/contracts/to_account_id.rs b/substrate/frame/revive/fixtures/contracts/to_account_id.rs
new file mode 100644
index 00000000000..c2a8fce3ec9
--- /dev/null
+++ b/substrate/frame/revive/fixtures/contracts/to_account_id.rs
@@ -0,0 +1,40 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![no_std]
+#![no_main]
+
+use common::input;
+use uapi::{HostFn, HostFnImpl as api};
+
+#[no_mangle]
+#[polkavm_derive::polkavm_export]
+pub extern "C" fn deploy() {}
+
+#[no_mangle]
+#[polkavm_derive::polkavm_export]
+pub extern "C" fn call() {
+    input!(
+        address: &[u8; 20],
+        expected_account_id: &[u8; 32],
+    );
+
+    let mut account_id = [0u8; 32];
+    api::to_account_id(address, &mut account_id);
+
+    assert!(&account_id == expected_account_id);
+}
diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs
index e23554f21ba..18d7bb0afc3 100644
--- a/substrate/frame/revive/src/benchmarking/mod.rs
+++ b/substrate/frame/revive/src/benchmarking/mod.rs
@@ -556,6 +556,38 @@ mod benchmarks {
 		assert_eq!(result.unwrap(), 1);
 	}
 
+	#[benchmark(pov_mode = Measured)]
+	fn seal_to_account_id() {
+		// use a mapped address for the benchmark, to ensure that we bench the worst
+		// case (and not the fallback case).
+		let address = {
+			let caller = account("seal_to_account_id", 0, 0);
+			T::Currency::set_balance(&caller, caller_funding::<T>());
+			T::AddressMapper::map(&caller).unwrap();
+			T::AddressMapper::to_address(&caller)
+		};
+
+		let len = <T::AccountId as MaxEncodedLen>::max_encoded_len();
+		build_runtime!(runtime, memory: [vec![0u8; len], address.0, ]);
+
+		let result;
+		#[block]
+		{
+			result = runtime.bench_to_account_id(memory.as_mut_slice(), len as u32, 0);
+		}
+
+		assert_ok!(result);
+		assert_ne!(
+			memory.as_slice()[20..32],
+			[0xEE; 12],
+			"fallback suffix found where none should be"
+		);
+		assert_eq!(
+			T::AccountId::decode(&mut memory.as_slice()),
+			Ok(runtime.ext().to_account_id(&address))
+		);
+	}
+
 	#[benchmark(pov_mode = Measured)]
 	fn seal_code_hash() {
 		let contract = Contract::<T>::with_index(1, WasmModule::dummy(), vec![]).unwrap();
diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs
index 1c6ca435aef..f696f75a4a1 100644
--- a/substrate/frame/revive/src/exec.rs
+++ b/substrate/frame/revive/src/exec.rs
@@ -293,6 +293,9 @@ pub trait Ext: sealing::Sealed {
 	/// Check if a contract lives at the specified `address`.
 	fn is_contract(&self, address: &H160) -> bool;
 
+	/// Returns the account id for the given `address`.
+	fn to_account_id(&self, address: &H160) -> AccountIdOf<Self::T>;
+
 	/// Returns the code hash of the contract for the given `address`.
 	/// If not a contract but account exists then `keccak_256([])` is returned, otherwise `zero`.
 	fn code_hash(&self, address: &H160) -> H256;
@@ -1572,6 +1575,10 @@ where
 		ContractInfoOf::<T>::contains_key(&address)
 	}
 
+	fn to_account_id(&self, address: &H160) -> T::AccountId {
+		T::AddressMapper::to_account_id(address)
+	}
+
 	fn code_hash(&self, address: &H160) -> H256 {
 		<ContractInfoOf<T>>::get(&address)
 			.map(|contract| contract.code_hash)
@@ -2582,6 +2589,40 @@ mod tests {
 		});
 	}
 
+	#[test]
+	fn to_account_id_returns_proper_values() {
+		let bob_code_hash = MockLoader::insert(Call, |ctx, _| {
+			let alice_account_id = <Test as Config>::AddressMapper::to_account_id(&ALICE_ADDR);
+			assert_eq!(ctx.ext.to_account_id(&ALICE_ADDR), alice_account_id);
+
+			const UNMAPPED_ADDR: H160 = H160([99u8; 20]);
+			let mut unmapped_fallback_account_id = [0xEE; 32];
+			unmapped_fallback_account_id[..20].copy_from_slice(UNMAPPED_ADDR.as_bytes());
+			assert_eq!(
+				ctx.ext.to_account_id(&UNMAPPED_ADDR),
+				AccountId32::new(unmapped_fallback_account_id)
+			);
+
+			exec_success()
+		});
+
+		ExtBuilder::default().build().execute_with(|| {
+			place_contract(&BOB, bob_code_hash);
+			let origin = Origin::from_account_id(ALICE);
+			let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap();
+			let result = MockStack::run_call(
+				origin,
+				BOB_ADDR,
+				&mut GasMeter::<Test>::new(GAS_LIMIT),
+				&mut storage_meter,
+				U256::zero(),
+				vec![0],
+				false,
+			);
+			assert_matches!(result, Ok(_));
+		});
+	}
+
 	#[test]
 	fn code_hash_returns_proper_values() {
 		let bob_code_hash = MockLoader::insert(Call, |ctx, _| {
diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs
index 35940f544d0..8398bc2cb66 100644
--- a/substrate/frame/revive/src/tests.rs
+++ b/substrate/frame/revive/src/tests.rs
@@ -4239,6 +4239,43 @@ fn origin_api_works() {
 	});
 }
 
+#[test]
+fn to_account_id_works() {
+	let (code_hash_code, _) = compile_module("to_account_id").unwrap();
+
+	ExtBuilder::default().existential_deposit(1).build().execute_with(|| {
+		let _ = <Test as Config>::Currency::set_balance(&ALICE, 1_000_000);
+		let _ = <Test as Config>::Currency::set_balance(&EVE, 1_000_000);
+
+		let Contract { addr, .. } =
+			builder::bare_instantiate(Code::Upload(code_hash_code)).build_and_unwrap_contract();
+
+		// mapped account
+		<Pallet<Test>>::map_account(RuntimeOrigin::signed(EVE)).unwrap();
+		let expected_mapped_account_id = &<Test as Config>::AddressMapper::to_account_id(&EVE_ADDR);
+		assert_ne!(
+			expected_mapped_account_id.encode()[20..32],
+			[0xEE; 12],
+			"fallback suffix found where none should be"
+		);
+		assert_ok!(builder::call(addr)
+			.data((EVE_ADDR, expected_mapped_account_id).encode())
+			.build());
+
+		// fallback for unmapped accounts
+		let expected_fallback_account_id =
+			&<Test as Config>::AddressMapper::to_account_id(&BOB_ADDR);
+		assert_eq!(
+			expected_fallback_account_id.encode()[20..32],
+			[0xEE; 12],
+			"no fallback suffix found where one should be"
+		);
+		assert_ok!(builder::call(addr)
+			.data((BOB_ADDR, expected_fallback_account_id).encode())
+			.build());
+	});
+}
+
 #[test]
 fn code_hash_works() {
 	let (code_hash_code, self_code_hash) = compile_module("code_hash").unwrap();
diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs
index 1ff6a80840a..4fbcfe1b47f 100644
--- a/substrate/frame/revive/src/wasm/runtime.rs
+++ b/substrate/frame/revive/src/wasm/runtime.rs
@@ -293,6 +293,8 @@ pub enum RuntimeCosts {
 	CallDataSize,
 	/// Weight of calling `seal_return_data_size`.
 	ReturnDataSize,
+	/// Weight of calling `seal_to_account_id`.
+	ToAccountId,
 	/// Weight of calling `seal_origin`.
 	Origin,
 	/// Weight of calling `seal_is_contract`.
@@ -466,6 +468,7 @@ impl<T: Config> Token<T> for RuntimeCosts {
 			Caller => T::WeightInfo::seal_caller(),
 			Origin => T::WeightInfo::seal_origin(),
 			IsContract => T::WeightInfo::seal_is_contract(),
+			ToAccountId => T::WeightInfo::seal_to_account_id(),
 			CodeHash => T::WeightInfo::seal_code_hash(),
 			CodeSize => T::WeightInfo::seal_code_size(),
 			OwnCodeHash => T::WeightInfo::seal_own_code_hash(),
@@ -2140,4 +2143,25 @@ pub mod env {
 			},
 		}
 	}
+
+	/// Retrieves the account id for a specified contract address.
+	///
+	/// See [`pallet_revive_uapi::HostFn::to_account_id`].
+	fn to_account_id(
+		&mut self,
+		memory: &mut M,
+		addr_ptr: u32,
+		out_ptr: u32,
+	) -> Result<(), TrapReason> {
+		self.charge_gas(RuntimeCosts::ToAccountId)?;
+		let address = memory.read_h160(addr_ptr)?;
+		let account_id = self.ext.to_account_id(&address);
+		Ok(self.write_fixed_sandbox_output(
+			memory,
+			out_ptr,
+			&account_id.encode(),
+			false,
+			already_charged,
+		)?)
+	}
 }
diff --git a/substrate/frame/revive/src/weights.rs b/substrate/frame/revive/src/weights.rs
index 06495d5d21a..52153d74ca7 100644
--- a/substrate/frame/revive/src/weights.rs
+++ b/substrate/frame/revive/src/weights.rs
@@ -67,6 +67,7 @@ pub trait WeightInfo {
 	fn seal_caller() -> Weight;
 	fn seal_origin() -> Weight;
 	fn seal_is_contract() -> Weight;
+	fn seal_to_account_id() -> Weight;
 	fn seal_code_hash() -> Weight;
 	fn seal_own_code_hash() -> Weight;
 	fn seal_code_size() -> Weight;
@@ -377,6 +378,16 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		Weight::from_parts(10_336_000, 3771)
 			.saturating_add(T::DbWeight::get().reads(1_u64))
 	}
+	/// Storage: `Revive::AddressSuffix` (r:1 w:0)
+	/// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`)
+	fn seal_to_account_id() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `212`
+		//  Estimated: `3677`
+		// Minimum execution time: 4_000_000 picoseconds.
+		Weight::from_parts(4_000_000, 3677)
+			.saturating_add(T::DbWeight::get().reads(1_u64))
+	}
 	/// Storage: `Revive::ContractInfoOf` (r:1 w:0)
 	/// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`)
 	fn seal_code_hash() -> Weight {
@@ -1263,6 +1274,16 @@ impl WeightInfo for () {
 		Weight::from_parts(10_336_000, 3771)
 			.saturating_add(RocksDbWeight::get().reads(1_u64))
 	}
+	/// Storage: `Revive::AddressSuffix` (r:1 w:0)
+	/// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`)
+	fn seal_to_account_id() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `212`
+		//  Estimated: `3677`
+		// Minimum execution time: 4_000_000 picoseconds.
+		Weight::from_parts(4_000_000, 3677)
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+	}
 	/// Storage: `Revive::ContractInfoOf` (r:1 w:0)
 	/// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`)
 	fn seal_code_hash() -> Weight {
diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs
index b82393826dd..3e5cf0eb0c2 100644
--- a/substrate/frame/revive/uapi/src/host.rs
+++ b/substrate/frame/revive/uapi/src/host.rs
@@ -144,6 +144,18 @@ pub trait HostFn: private::Sealed {
 	/// - `output`: A reference to the output data buffer to write the origin's address.
 	fn origin(output: &mut [u8; 20]);
 
+	/// Retrieve the account id for a specified address.
+	///
+	/// # Parameters
+	///
+	/// - `addr`: A `H160` address.
+	/// - `output`: A reference to the output data buffer to write the account id.
+	///
+	/// # Note
+	///
+	/// If no mapping exists for `addr`, the fallback account id will be returned.
+	fn to_account_id(addr: &[u8; 20], output: &mut [u8]);
+
 	/// Retrieve the code hash for a specified contract address.
 	///
 	/// # Parameters
diff --git a/substrate/frame/revive/uapi/src/host/riscv64.rs b/substrate/frame/revive/uapi/src/host/riscv64.rs
index 0023b8aa721..3726564e26e 100644
--- a/substrate/frame/revive/uapi/src/host/riscv64.rs
+++ b/substrate/frame/revive/uapi/src/host/riscv64.rs
@@ -69,6 +69,7 @@ mod sys {
 		pub fn caller(out_ptr: *mut u8);
 		pub fn origin(out_ptr: *mut u8);
 		pub fn is_contract(account_ptr: *const u8) -> ReturnCode;
+		pub fn to_account_id(address_ptr: *const u8, out_ptr: *mut u8);
 		pub fn code_hash(address_ptr: *const u8, out_ptr: *mut u8);
 		pub fn code_size(address_ptr: *const u8) -> u64;
 		pub fn own_code_hash(out_ptr: *mut u8);
@@ -456,6 +457,11 @@ impl HostFn for HostFnImpl {
 		unsafe { sys::ref_time_left() }
 	}
 
+	#[unstable_hostfn]
+	fn to_account_id(address: &[u8; 20], output: &mut [u8]) {
+		unsafe { sys::to_account_id(address.as_ptr(), output.as_mut_ptr()) }
+	}
+
 	#[unstable_hostfn]
 	fn block_hash(block_number_ptr: &[u8; 32], output: &mut [u8; 32]) {
 		unsafe { sys::block_hash(block_number_ptr.as_ptr(), output.as_mut_ptr()) };
-- 
GitLab


From be2404cccd9923c41e2f16bfe655f19574f1ae0e Mon Sep 17 00:00:00 2001
From: liamaharon <liam.aharon@hotmail.com>
Date: Thu, 16 Jan 2025 10:26:59 +0400
Subject: [PATCH 067/116] Implement `pallet-asset-rewards` (#3926)

Closes #3149

## Description

This PR introduces `pallet-asset-rewards`, which allows accounts to be
rewarded for freezing `fungible` tokens. The motivation for creating
this pallet is to allow incentivising LPs.

See the pallet docs for more info about the pallet.

## Runtime changes

The pallet has been added to
- `asset-hub-rococo`
- `asset-hub-westend`

The `NativeAndAssets` `fungibles` Union did not contain `PoolAssets`, so
it has been renamed `NativeAndNonPoolAssets`

A new `fungibles` Union `NativeAndAllAssets` was created to encompass
all assets and the native token.

## TODO
- [x] Emulation tests
- [x] Fill in Freeze logic (blocked
https://github.com/paritytech/polkadot-sdk/issues/3342) and re-run
benchmarks

---------

Co-authored-by: command-bot <>
Co-authored-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
Co-authored-by: muharem <ismailov.m.h@gmail.com>
Co-authored-by: Guillaume Thiolliere <gui.thiolliere@gmail.com>
---
 Cargo.lock                                    |   27 +
 Cargo.toml                                    |    2 +
 .../assets/asset-hub-rococo/Cargo.toml        |    1 +
 .../emulated/common/src/lib.rs                |    2 +
 .../tests/assets/asset-hub-rococo/Cargo.toml  |    1 +
 .../tests/assets/asset-hub-rococo/src/lib.rs  |    3 +-
 .../assets/asset-hub-rococo/src/tests/mod.rs  |    1 +
 .../asset-hub-rococo/src/tests/reward_pool.rs |  114 ++
 .../tests/assets/asset-hub-westend/Cargo.toml |    1 +
 .../tests/assets/asset-hub-westend/src/lib.rs |    8 +-
 .../assets/asset-hub-westend/src/tests/mod.rs |    1 +
 .../src/tests/reward_pool.rs                  |  113 ++
 .../assets/asset-hub-rococo/Cargo.toml        |    5 +
 .../assets/asset-hub-rococo/src/lib.rs        |  142 +-
 .../asset-hub-rococo/src/weights/mod.rs       |    1 +
 .../src/weights/pallet_asset_rewards.rs       |  217 +++
 .../assets/asset-hub-rococo/src/xcm_config.rs |   10 +-
 .../assets/asset-hub-westend/Cargo.toml       |    5 +
 .../assets/asset-hub-westend/src/lib.rs       |  144 +-
 .../asset-hub-westend/src/weights/mod.rs      |    1 +
 .../src/weights/pallet_asset_rewards.rs       |  217 +++
 .../asset-hub-westend/src/xcm_config.rs       |    7 +-
 .../runtimes/assets/common/src/lib.rs         |    7 +-
 polkadot/runtime/rococo/src/xcm_config.rs     |   20 +-
 polkadot/runtime/westend/src/xcm_config.rs    |    5 +-
 prdoc/pr_3926.prdoc                           |   30 +
 substrate/bin/node/runtime/src/lib.rs         |   77 +-
 substrate/frame/asset-rewards/Cargo.toml      |   71 +
 .../frame/asset-rewards/src/benchmarking.rs   |  355 ++++
 substrate/frame/asset-rewards/src/lib.rs      |  905 ++++++++++
 substrate/frame/asset-rewards/src/mock.rs     |  221 +++
 substrate/frame/asset-rewards/src/tests.rs    | 1457 +++++++++++++++++
 substrate/frame/asset-rewards/src/weights.rs  |  368 +++++
 substrate/frame/support/src/traits.rs         |    5 +-
 substrate/frame/support/src/traits/storage.rs |   12 +
 umbrella/Cargo.toml                           |   10 +-
 umbrella/src/lib.rs                           |    4 +
 37 files changed, 4517 insertions(+), 53 deletions(-)
 create mode 100644 cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reward_pool.rs
 create mode 100644 cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reward_pool.rs
 create mode 100644 cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_rewards.rs
 create mode 100644 cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_rewards.rs
 create mode 100644 prdoc/pr_3926.prdoc
 create mode 100644 substrate/frame/asset-rewards/Cargo.toml
 create mode 100644 substrate/frame/asset-rewards/src/benchmarking.rs
 create mode 100644 substrate/frame/asset-rewards/src/lib.rs
 create mode 100644 substrate/frame/asset-rewards/src/mock.rs
 create mode 100644 substrate/frame/asset-rewards/src/tests.rs
 create mode 100644 substrate/frame/asset-rewards/src/weights.rs

diff --git a/Cargo.lock b/Cargo.lock
index 0d71a770d38..6eba7e65109 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -910,6 +910,7 @@ dependencies = [
  "cumulus-primitives-core 0.7.0",
  "emulated-integration-tests-common",
  "frame-support 28.0.0",
+ "pallet-asset-rewards",
  "parachains-common 7.0.0",
  "rococo-emulated-chain",
  "sp-core 28.0.0",
@@ -928,6 +929,7 @@ dependencies = [
  "emulated-integration-tests-common",
  "frame-support 28.0.0",
  "pallet-asset-conversion 10.0.0",
+ "pallet-asset-rewards",
  "pallet-assets 29.1.0",
  "pallet-balances 28.0.0",
  "pallet-message-queue 31.0.0",
@@ -978,6 +980,7 @@ dependencies = [
  "pallet-asset-conversion 10.0.0",
  "pallet-asset-conversion-ops 0.1.0",
  "pallet-asset-conversion-tx-payment 10.0.0",
+ "pallet-asset-rewards",
  "pallet-assets 29.1.0",
  "pallet-assets-freezer 0.1.0",
  "pallet-aura 27.0.0",
@@ -1063,6 +1066,7 @@ dependencies = [
  "frame-support 28.0.0",
  "frame-system 28.0.0",
  "pallet-asset-conversion 10.0.0",
+ "pallet-asset-rewards",
  "pallet-asset-tx-payment 28.0.0",
  "pallet-assets 29.1.0",
  "pallet-balances 28.0.0",
@@ -1114,6 +1118,7 @@ dependencies = [
  "pallet-asset-conversion 10.0.0",
  "pallet-asset-conversion-ops 0.1.0",
  "pallet-asset-conversion-tx-payment 10.0.0",
+ "pallet-asset-rewards",
  "pallet-assets 29.1.0",
  "pallet-assets-freezer 0.1.0",
  "pallet-aura 27.0.0",
@@ -12036,6 +12041,27 @@ dependencies = [
  "sp-runtime 39.0.2",
 ]
 
+[[package]]
+name = "pallet-asset-rewards"
+version = "0.1.0"
+dependencies = [
+ "frame-benchmarking 28.0.0",
+ "frame-support 28.0.0",
+ "frame-system 28.0.0",
+ "pallet-assets 29.1.0",
+ "pallet-assets-freezer 0.1.0",
+ "pallet-balances 28.0.0",
+ "parity-scale-codec",
+ "primitive-types 0.13.1",
+ "scale-info",
+ "sp-api 26.0.0",
+ "sp-arithmetic 23.0.0",
+ "sp-core 28.0.0",
+ "sp-io 30.0.0",
+ "sp-runtime 31.0.1",
+ "sp-std 14.0.0",
+]
+
 [[package]]
 name = "pallet-asset-tx-payment"
 version = "28.0.0"
@@ -18715,6 +18741,7 @@ dependencies = [
  "pallet-asset-conversion-ops 0.1.0",
  "pallet-asset-conversion-tx-payment 10.0.0",
  "pallet-asset-rate 7.0.0",
+ "pallet-asset-rewards",
  "pallet-asset-tx-payment 28.0.0",
  "pallet-assets 29.1.0",
  "pallet-assets-freezer 0.1.0",
diff --git a/Cargo.toml b/Cargo.toml
index eb99b80e16f..509775fe99e 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -315,6 +315,7 @@ members = [
 	"substrate/frame/asset-conversion",
 	"substrate/frame/asset-conversion/ops",
 	"substrate/frame/asset-rate",
+	"substrate/frame/asset-rewards",
 	"substrate/frame/assets",
 	"substrate/frame/assets-freezer",
 	"substrate/frame/atomic-swap",
@@ -893,6 +894,7 @@ pallet-asset-conversion = { path = "substrate/frame/asset-conversion", default-f
 pallet-asset-conversion-ops = { path = "substrate/frame/asset-conversion/ops", default-features = false }
 pallet-asset-conversion-tx-payment = { path = "substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false }
 pallet-asset-rate = { path = "substrate/frame/asset-rate", default-features = false }
+pallet-asset-rewards = { path = "substrate/frame/asset-rewards", default-features = false }
 pallet-asset-tx-payment = { path = "substrate/frame/transaction-payment/asset-tx-payment", default-features = false }
 pallet-assets = { path = "substrate/frame/assets", default-features = false }
 pallet-assets-freezer = { path = "substrate/frame/assets-freezer", default-features = false }
diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml
index a164a8197f7..c6a8baeff3b 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml
+++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml
@@ -14,6 +14,7 @@ workspace = true
 
 # Substrate
 frame-support = { workspace = true }
+pallet-asset-rewards = { workspace = true }
 sp-core = { workspace = true }
 sp-keyring = { workspace = true }
 
diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs
index e2757f8b9a3..f5466a63f1f 100644
--- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs
@@ -58,6 +58,8 @@ pub const USDT_ID: u32 = 1984;
 
 pub const PENPAL_A_ID: u32 = 2000;
 pub const PENPAL_B_ID: u32 = 2001;
+pub const ASSET_HUB_ROCOCO_ID: u32 = 1000;
+pub const ASSET_HUB_WESTEND_ID: u32 = 1000;
 pub const ASSETS_PALLET_ID: u8 = 50;
 
 parameter_types! {
diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml
index 9e8b8f2a52d..b53edb39c73 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml
+++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml
@@ -17,6 +17,7 @@ codec = { workspace = true }
 # Substrate
 frame-support = { workspace = true }
 pallet-asset-conversion = { workspace = true }
+pallet-asset-rewards = { workspace = true }
 pallet-assets = { workspace = true }
 pallet-balances = { workspace = true }
 pallet-message-queue = { workspace = true }
diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs
index f3a1b3f5bfa..513ca278a31 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs
@@ -76,10 +76,11 @@ mod imports {
 			genesis::ED as ROCOCO_ED,
 			rococo_runtime::{
 				governance as rococo_governance,
+				governance::pallet_custom_origins::Origin::Treasurer,
 				xcm_config::{
 					UniversalLocation as RococoUniversalLocation, XcmConfig as RococoXcmConfig,
 				},
-				OriginCaller as RococoOriginCaller,
+				Dmp, OriginCaller as RococoOriginCaller,
 			},
 			RococoRelayPallet as RococoPallet,
 		},
diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs
index 88fa379c407..75714acb07c 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs
@@ -16,6 +16,7 @@
 mod claim_assets;
 mod hybrid_transfers;
 mod reserve_transfer;
+mod reward_pool;
 mod send;
 mod set_xcm_versions;
 mod swap;
diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reward_pool.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reward_pool.rs
new file mode 100644
index 00000000000..2f3ee536a7b
--- /dev/null
+++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reward_pool.rs
@@ -0,0 +1,114 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::imports::*;
+use codec::Encode;
+use frame_support::{assert_ok, sp_runtime::traits::Dispatchable, traits::schedule::DispatchTime};
+use xcm_executor::traits::ConvertLocation;
+
+#[test]
+fn treasury_creates_asset_reward_pool() {
+	AssetHubRococo::execute_with(|| {
+		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
+		type Balances = <AssetHubRococo as AssetHubRococoPallet>::Balances;
+
+		let treasurer =
+			Location::new(1, [Plurality { id: BodyId::Treasury, part: BodyPart::Voice }]);
+		let treasurer_account =
+			ahr_xcm_config::LocationToAccountId::convert_location(&treasurer).unwrap();
+
+		assert_ok!(Balances::force_set_balance(
+			<AssetHubRococo as Chain>::RuntimeOrigin::root(),
+			treasurer_account.clone().into(),
+			ASSET_HUB_ROCOCO_ED * 100_000,
+		));
+
+		let events = AssetHubRococo::events();
+		match events.iter().last() {
+			Some(RuntimeEvent::Balances(pallet_balances::Event::BalanceSet { who, .. })) =>
+				assert_eq!(*who, treasurer_account),
+			_ => panic!("Expected Balances::BalanceSet event"),
+		}
+	});
+
+	Rococo::execute_with(|| {
+		type AssetHubRococoRuntimeCall = <AssetHubRococo as Chain>::RuntimeCall;
+		type AssetHubRococoRuntime = <AssetHubRococo as Chain>::Runtime;
+		type RococoRuntimeCall = <Rococo as Chain>::RuntimeCall;
+		type RococoRuntime = <Rococo as Chain>::Runtime;
+		type RococoRuntimeEvent = <Rococo as Chain>::RuntimeEvent;
+		type RococoRuntimeOrigin = <Rococo as Chain>::RuntimeOrigin;
+
+		Dmp::make_parachain_reachable(AssetHubRococo::para_id());
+
+		let staked_asset_id = bx!(RelayLocation::get());
+		let reward_asset_id = bx!(RelayLocation::get());
+
+		let reward_rate_per_block = 1_000_000_000;
+		let lifetime = 1_000_000_000;
+		let admin = None;
+
+		let create_pool_call =
+			RococoRuntimeCall::XcmPallet(pallet_xcm::Call::<RococoRuntime>::send {
+				dest: bx!(VersionedLocation::V4(
+					xcm::v4::Junction::Parachain(AssetHubRococo::para_id().into()).into()
+				)),
+				message: bx!(VersionedXcm::V5(Xcm(vec![
+					UnpaidExecution { weight_limit: Unlimited, check_origin: None },
+					Transact {
+						origin_kind: OriginKind::SovereignAccount,
+						fallback_max_weight: None,
+						call: AssetHubRococoRuntimeCall::AssetRewards(
+							pallet_asset_rewards::Call::<AssetHubRococoRuntime>::create_pool {
+								staked_asset_id,
+								reward_asset_id,
+								reward_rate_per_block,
+								expiry: DispatchTime::After(lifetime),
+								admin
+							}
+						)
+						.encode()
+						.into(),
+					}
+				]))),
+			});
+
+		let treasury_origin: RococoRuntimeOrigin = Treasurer.into();
+		assert_ok!(create_pool_call.dispatch(treasury_origin));
+
+		assert_expected_events!(
+			Rococo,
+			vec![
+				RococoRuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {},
+			]
+		);
+	});
+
+	AssetHubRococo::execute_with(|| {
+		type Runtime = <AssetHubRococo as Chain>::Runtime;
+		type RuntimeEvent = <AssetHubRococo as Chain>::RuntimeEvent;
+
+		assert_eq!(1, pallet_asset_rewards::Pools::<Runtime>::iter().count());
+
+		let events = AssetHubRococo::events();
+		match events.iter().last() {
+			Some(RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed {
+				success: true,
+				..
+			})) => (),
+			_ => panic!("Expected MessageQueue::Processed event"),
+		}
+	});
+}
diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml
index 5cd00c239e6..ef68a53c3b1 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml
+++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml
@@ -19,6 +19,7 @@ frame-metadata-hash-extension = { workspace = true, default-features = true }
 frame-support = { workspace = true }
 frame-system = { workspace = true }
 pallet-asset-conversion = { workspace = true }
+pallet-asset-rewards = { workspace = true }
 pallet-asset-tx-payment = { workspace = true }
 pallet-assets = { workspace = true }
 pallet-balances = { workspace = true }
diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs
index 36630e2d222..68dc87250f7 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs
@@ -79,8 +79,12 @@ mod imports {
 		},
 		westend_emulated_chain::{
 			genesis::ED as WESTEND_ED,
-			westend_runtime::xcm_config::{
-				UniversalLocation as WestendUniversalLocation, XcmConfig as WestendXcmConfig,
+			westend_runtime::{
+				governance::pallet_custom_origins::Origin::Treasurer,
+				xcm_config::{
+					UniversalLocation as WestendUniversalLocation, XcmConfig as WestendXcmConfig,
+				},
+				Dmp,
 			},
 			WestendRelayPallet as WestendPallet,
 		},
diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs
index 0dfe7a85f4c..576c44fc542 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs
@@ -17,6 +17,7 @@ mod claim_assets;
 mod fellowship_treasury;
 mod hybrid_transfers;
 mod reserve_transfer;
+mod reward_pool;
 mod send;
 mod set_asset_claimer;
 mod set_xcm_versions;
diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reward_pool.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reward_pool.rs
new file mode 100644
index 00000000000..4df51abcace
--- /dev/null
+++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reward_pool.rs
@@ -0,0 +1,113 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::imports::*;
+use codec::Encode;
+use frame_support::{assert_ok, sp_runtime::traits::Dispatchable, traits::schedule::DispatchTime};
+use xcm_executor::traits::ConvertLocation;
+
+#[test]
+fn treasury_creates_asset_reward_pool() {
+	AssetHubWestend::execute_with(|| {
+		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+		type Balances = <AssetHubWestend as AssetHubWestendPallet>::Balances;
+
+		let treasurer =
+			Location::new(1, [Plurality { id: BodyId::Treasury, part: BodyPart::Voice }]);
+		let treasurer_account =
+			ahw_xcm_config::LocationToAccountId::convert_location(&treasurer).unwrap();
+
+		assert_ok!(Balances::force_set_balance(
+			<AssetHubWestend as Chain>::RuntimeOrigin::root(),
+			treasurer_account.clone().into(),
+			ASSET_HUB_WESTEND_ED * 100_000,
+		));
+
+		let events = AssetHubWestend::events();
+		match events.iter().last() {
+			Some(RuntimeEvent::Balances(pallet_balances::Event::BalanceSet { who, .. })) =>
+				assert_eq!(*who, treasurer_account),
+			_ => panic!("Expected Balances::BalanceSet event"),
+		}
+	});
+	Westend::execute_with(|| {
+		type AssetHubWestendRuntimeCall = <AssetHubWestend as Chain>::RuntimeCall;
+		type AssetHubWestendRuntime = <AssetHubWestend as Chain>::Runtime;
+		type WestendRuntimeCall = <Westend as Chain>::RuntimeCall;
+		type WestendRuntime = <Westend as Chain>::Runtime;
+		type WestendRuntimeEvent = <Westend as Chain>::RuntimeEvent;
+		type WestendRuntimeOrigin = <Westend as Chain>::RuntimeOrigin;
+
+		Dmp::make_parachain_reachable(AssetHubWestend::para_id());
+
+		let staked_asset_id = bx!(RelayLocation::get());
+		let reward_asset_id = bx!(RelayLocation::get());
+
+		let reward_rate_per_block = 1_000_000_000;
+		let lifetime = 1_000_000_000;
+		let admin = None;
+
+		let create_pool_call =
+			WestendRuntimeCall::XcmPallet(pallet_xcm::Call::<WestendRuntime>::send {
+				dest: bx!(VersionedLocation::V4(
+					xcm::v4::Junction::Parachain(AssetHubWestend::para_id().into()).into()
+				)),
+				message: bx!(VersionedXcm::V5(Xcm(vec![
+					UnpaidExecution { weight_limit: Unlimited, check_origin: None },
+					Transact {
+						origin_kind: OriginKind::SovereignAccount,
+						fallback_max_weight: None,
+						call: AssetHubWestendRuntimeCall::AssetRewards(
+							pallet_asset_rewards::Call::<AssetHubWestendRuntime>::create_pool {
+								staked_asset_id,
+								reward_asset_id,
+								reward_rate_per_block,
+								expiry: DispatchTime::After(lifetime),
+								admin
+							}
+						)
+						.encode()
+						.into(),
+					}
+				]))),
+			});
+
+		let treasury_origin: WestendRuntimeOrigin = Treasurer.into();
+		assert_ok!(create_pool_call.dispatch(treasury_origin));
+
+		assert_expected_events!(
+			Westend,
+			vec![
+				WestendRuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {},
+			]
+		);
+	});
+
+	AssetHubWestend::execute_with(|| {
+		type Runtime = <AssetHubWestend as Chain>::Runtime;
+		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+
+		assert_eq!(1, pallet_asset_rewards::Pools::<Runtime>::iter().count());
+
+		let events = AssetHubWestend::events();
+		match events.iter().last() {
+			Some(RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed {
+				success: true,
+				..
+			})) => (),
+			_ => panic!("Expected MessageQueue::Processed event"),
+		}
+	});
+}
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml
index abe59a8439a..d612dd03c24 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml
@@ -30,6 +30,7 @@ frame-try-runtime = { optional = true, workspace = true }
 pallet-asset-conversion = { workspace = true }
 pallet-asset-conversion-ops = { workspace = true }
 pallet-asset-conversion-tx-payment = { workspace = true }
+pallet-asset-rewards = { workspace = true }
 pallet-assets = { workspace = true }
 pallet-assets-freezer = { workspace = true }
 pallet-aura = { workspace = true }
@@ -61,6 +62,7 @@ sp-storage = { workspace = true }
 sp-transaction-pool = { workspace = true }
 sp-version = { workspace = true }
 sp-weights = { workspace = true }
+
 # num-traits feature needed for dex integer sq root:
 primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true }
 
@@ -123,6 +125,7 @@ runtime-benchmarks = [
 	"pallet-asset-conversion-ops/runtime-benchmarks",
 	"pallet-asset-conversion-tx-payment/runtime-benchmarks",
 	"pallet-asset-conversion/runtime-benchmarks",
+	"pallet-asset-rewards/runtime-benchmarks",
 	"pallet-assets-freezer/runtime-benchmarks",
 	"pallet-assets/runtime-benchmarks",
 	"pallet-balances/runtime-benchmarks",
@@ -162,6 +165,7 @@ try-runtime = [
 	"pallet-asset-conversion-ops/try-runtime",
 	"pallet-asset-conversion-tx-payment/try-runtime",
 	"pallet-asset-conversion/try-runtime",
+	"pallet-asset-rewards/try-runtime",
 	"pallet-assets-freezer/try-runtime",
 	"pallet-assets/try-runtime",
 	"pallet-aura/try-runtime",
@@ -212,6 +216,7 @@ std = [
 	"pallet-asset-conversion-ops/std",
 	"pallet-asset-conversion-tx-payment/std",
 	"pallet-asset-conversion/std",
+	"pallet-asset-rewards/std",
 	"pallet-assets-freezer/std",
 	"pallet-assets/std",
 	"pallet-aura/std",
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
index db9a8201ebb..43b7bf0ba11 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
@@ -35,7 +35,7 @@ use assets_common::{
 	foreign_creators::ForeignCreators,
 	local_and_foreign_assets::{LocalFromLeft, TargetFromLeft},
 	matching::{FromNetwork, FromSiblingParachain},
-	AssetIdForTrustBackedAssetsConvert,
+	AssetIdForPoolAssets, AssetIdForPoolAssetsConvert, AssetIdForTrustBackedAssetsConvert,
 };
 use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases;
 use cumulus_primitives_core::{AggregateMessageOrigin, ClaimQueueOffset, CoreSelector};
@@ -61,9 +61,9 @@ use frame_support::{
 	genesis_builder_helper::{build_state, get_preset},
 	ord_parameter_types, parameter_types,
 	traits::{
-		fungible, fungibles, tokens::imbalance::ResolveAssetTo, AsEnsureOriginWithArg, ConstBool,
-		ConstU128, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Equals, InstanceFilter,
-		TransformOrigin,
+		fungible, fungible::HoldConsideration, fungibles, tokens::imbalance::ResolveAssetTo,
+		AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8,
+		ConstantStoragePrice, EitherOfDiverse, Equals, InstanceFilter, TransformOrigin,
 	},
 	weights::{ConstantMultiplier, Weight, WeightToFee as _},
 	BoundedVec, PalletId,
@@ -84,8 +84,8 @@ use sp_runtime::{Perbill, RuntimeDebug};
 use testnet_parachains_constants::rococo::{consensus::*, currency::*, fee::WeightToFee, time::*};
 use xcm_config::{
 	ForeignAssetsConvertedConcreteId, GovernanceLocation, LocationToAccountId,
-	PoolAssetsConvertedConcreteId, TokenLocation, TrustBackedAssetsConvertedConcreteId,
-	TrustBackedAssetsPalletLocation,
+	PoolAssetsConvertedConcreteId, PoolAssetsPalletLocation, TokenLocation,
+	TrustBackedAssetsConvertedConcreteId, TrustBackedAssetsPalletLocation,
 };
 
 #[cfg(test)]
@@ -111,6 +111,9 @@ use xcm_runtime_apis::{
 	fees::Error as XcmPaymentApiError,
 };
 
+#[cfg(feature = "runtime-benchmarks")]
+use frame_support::traits::PalletInfoAccess;
+
 use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight};
 
 impl_opaque_keys! {
@@ -217,8 +220,8 @@ impl pallet_balances::Config for Runtime {
 	type ReserveIdentifier = [u8; 8];
 	type RuntimeHoldReason = RuntimeHoldReason;
 	type RuntimeFreezeReason = RuntimeFreezeReason;
-	type FreezeIdentifier = ();
-	type MaxFreezes = ConstU32<0>;
+	type FreezeIdentifier = RuntimeFreezeReason;
+	type MaxFreezes = ConstU32<50>;
 	type DoneSlashHandler = ();
 }
 
@@ -302,7 +305,7 @@ impl pallet_assets::Config<PoolAssetsInstance> for Runtime {
 	type RuntimeEvent = RuntimeEvent;
 	type Balance = Balance;
 	type RemoveItemsLimit = ConstU32<1000>;
-	type AssetId = u32;
+	type AssetId = AssetIdForPoolAssets;
 	type AssetIdParameter = u32;
 	type Currency = Balances;
 	type CreateOrigin =
@@ -343,8 +346,21 @@ pub type LocalAndForeignAssets = fungibles::UnionOf<
 	AccountId,
 >;
 
-/// Union fungibles implementation for [`LocalAndForeignAssets`] and `Balances`.
-pub type NativeAndAssets = fungible::UnionOf<
+/// Union fungibles implementation for `AssetsFreezer` and `ForeignAssetsFreezer`.
+pub type LocalAndForeignAssetsFreezer = fungibles::UnionOf<
+	AssetsFreezer,
+	ForeignAssetsFreezer,
+	LocalFromLeft<
+		AssetIdForTrustBackedAssetsConvert<TrustBackedAssetsPalletLocation, xcm::v5::Location>,
+		AssetIdForTrustBackedAssets,
+		xcm::v5::Location,
+	>,
+	xcm::v5::Location,
+	AccountId,
+>;
+
+/// Union fungibles implementation for [`LocalAndForeignAssets`] and [`Balances`].
+pub type NativeAndNonPoolAssets = fungible::UnionOf<
 	Balances,
 	LocalAndForeignAssets,
 	TargetFromLeft<TokenLocation, xcm::v5::Location>,
@@ -352,6 +368,45 @@ pub type NativeAndAssets = fungible::UnionOf<
 	AccountId,
 >;
 
+/// Union fungibles implementation for [`LocalAndForeignAssetsFreezer`] and [`Balances`].
+pub type NativeAndNonPoolAssetsFreezer = fungible::UnionOf<
+	Balances,
+	LocalAndForeignAssetsFreezer,
+	TargetFromLeft<TokenLocation, xcm::v5::Location>,
+	xcm::v5::Location,
+	AccountId,
+>;
+
+/// Union fungibles implementation for [`PoolAssets`] and [`NativeAndNonPoolAssets`].
+///
+/// NOTE: Should be kept updated to include ALL balances and assets in the runtime.
+pub type NativeAndAllAssets = fungibles::UnionOf<
+	PoolAssets,
+	NativeAndNonPoolAssets,
+	LocalFromLeft<
+		AssetIdForPoolAssetsConvert<PoolAssetsPalletLocation, xcm::v5::Location>,
+		AssetIdForPoolAssets,
+		xcm::v5::Location,
+	>,
+	xcm::v5::Location,
+	AccountId,
+>;
+
+/// Union fungibles implementation for [`PoolAssetsFreezer`] and [`NativeAndNonPoolAssetsFreezer`].
+///
+/// NOTE: Should be kept updated to include ALL balances and assets in the runtime.
+pub type NativeAndAllAssetsFreezer = fungibles::UnionOf<
+	PoolAssetsFreezer,
+	NativeAndNonPoolAssetsFreezer,
+	LocalFromLeft<
+		AssetIdForPoolAssetsConvert<PoolAssetsPalletLocation, xcm::v5::Location>,
+		AssetIdForPoolAssets,
+		xcm::v5::Location,
+	>,
+	xcm::v5::Location,
+	AccountId,
+>;
+
 pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter<
 	AssetConversionPalletId,
 	(xcm::v5::Location, xcm::v5::Location),
@@ -362,7 +417,7 @@ impl pallet_asset_conversion::Config for Runtime {
 	type Balance = Balance;
 	type HigherPrecisionBalance = sp_core::U256;
 	type AssetKind = xcm::v5::Location;
-	type Assets = NativeAndAssets;
+	type Assets = NativeAndNonPoolAssets;
 	type PoolId = (Self::AssetKind, Self::AssetKind);
 	type PoolLocator = pallet_asset_conversion::WithFirstAsset<
 		TokenLocation,
@@ -823,9 +878,9 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime {
 	type AssetId = xcm::v5::Location;
 	type OnChargeAssetTransaction = SwapAssetAdapter<
 		TokenLocation,
-		NativeAndAssets,
+		NativeAndNonPoolAssets,
 		AssetConversion,
-		ResolveAssetTo<StakingPot, NativeAndAssets>,
+		ResolveAssetTo<StakingPot, NativeAndNonPoolAssets>,
 	>;
 	type WeightInfo = weights::pallet_asset_conversion_tx_payment::WeightInfo<Runtime>;
 	#[cfg(feature = "runtime-benchmarks")]
@@ -953,6 +1008,55 @@ impl pallet_xcm_bridge_hub_router::Config<ToWestendXcmRouterInstance> for Runtim
 	type FeeAsset = xcm_config::bridging::XcmBridgeHubRouterFeeAssetId;
 }
 
+#[cfg(feature = "runtime-benchmarks")]
+pub struct PalletAssetRewardsBenchmarkHelper;
+
+#[cfg(feature = "runtime-benchmarks")]
+impl pallet_asset_rewards::benchmarking::BenchmarkHelper<xcm::v5::Location>
+	for PalletAssetRewardsBenchmarkHelper
+{
+	fn staked_asset() -> Location {
+		Location::new(
+			0,
+			[PalletInstance(<Assets as PalletInfoAccess>::index() as u8), GeneralIndex(100)],
+		)
+	}
+	fn reward_asset() -> Location {
+		Location::new(
+			0,
+			[PalletInstance(<Assets as PalletInfoAccess>::index() as u8), GeneralIndex(101)],
+		)
+	}
+}
+
+parameter_types! {
+	pub const AssetRewardsPalletId: PalletId = PalletId(*b"py/astrd");
+	pub const RewardsPoolCreationHoldReason: RuntimeHoldReason =
+		RuntimeHoldReason::AssetRewards(pallet_asset_rewards::HoldReason::PoolCreation);
+	// 1 item, 135 bytes into the storage on pool creation.
+	pub const StakePoolCreationDeposit: Balance = deposit(1, 135);
+}
+
+impl pallet_asset_rewards::Config for Runtime {
+	type RuntimeEvent = RuntimeEvent;
+	type PalletId = AssetRewardsPalletId;
+	type Balance = Balance;
+	type Assets = NativeAndAllAssets;
+	type AssetsFreezer = NativeAndAllAssetsFreezer;
+	type AssetId = xcm::v5::Location;
+	type CreatePoolOrigin = EnsureSigned<AccountId>;
+	type RuntimeFreezeReason = RuntimeFreezeReason;
+	type Consideration = HoldConsideration<
+		AccountId,
+		Balances,
+		RewardsPoolCreationHoldReason,
+		ConstantStoragePrice<StakePoolCreationDeposit, Balance>,
+	>;
+	type WeightInfo = weights::pallet_asset_rewards::WeightInfo<Runtime>;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = PalletAssetRewardsBenchmarkHelper;
+}
+
 // Create the runtime by composing the FRAME pallets that were previously configured.
 construct_runtime!(
 	pub enum Runtime
@@ -998,10 +1102,13 @@ construct_runtime!(
 		NftFractionalization: pallet_nft_fractionalization = 54,
 		PoolAssets: pallet_assets::<Instance3> = 55,
 		AssetConversion: pallet_asset_conversion = 56,
+
 		AssetsFreezer: pallet_assets_freezer::<Instance1> = 57,
 		ForeignAssetsFreezer: pallet_assets_freezer::<Instance2> = 58,
 		PoolAssetsFreezer: pallet_assets_freezer::<Instance3> = 59,
 
+		AssetRewards: pallet_asset_rewards = 60,
+
 		// TODO: the pallet instance should be removed once all pools have migrated
 		// to the new account IDs.
 		AssetConversionMigration: pallet_asset_conversion_ops = 200,
@@ -1193,6 +1300,7 @@ mod benches {
 		[pallet_assets, Foreign]
 		[pallet_assets, Pool]
 		[pallet_asset_conversion, AssetConversion]
+		[pallet_asset_rewards, AssetRewards]
 		[pallet_asset_conversion_tx_payment, AssetTxPayment]
 		[pallet_balances, Balances]
 		[pallet_message_queue, MessageQueue]
@@ -1503,6 +1611,12 @@ impl_runtime_apis! {
 		}
 	}
 
+	impl pallet_asset_rewards::AssetRewards<Block, Balance> for Runtime {
+		fn pool_creation_cost() -> Balance {
+			StakePoolCreationDeposit::get()
+		}
+	}
+
 	impl cumulus_primitives_core::GetCoreSelectorApi<Block> for Runtime {
 		fn core_selector() -> (CoreSelector, ClaimQueueOffset) {
 			ParachainSystem::core_selector()
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs
index ae78a56d8b3..6893766ac72 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs
@@ -24,6 +24,7 @@ pub mod frame_system_extensions;
 pub mod pallet_asset_conversion;
 pub mod pallet_asset_conversion_ops;
 pub mod pallet_asset_conversion_tx_payment;
+pub mod pallet_asset_rewards;
 pub mod pallet_assets_foreign;
 pub mod pallet_assets_local;
 pub mod pallet_assets_pool;
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_rewards.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_rewards.rs
new file mode 100644
index 00000000000..218c93c5103
--- /dev/null
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_rewards.rs
@@ -0,0 +1,217 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `pallet_asset_rewards`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_asset_rewards
+// --chain=asset-hub-rococo-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `pallet_asset_rewards`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> pallet_asset_rewards::WeightInfo for WeightInfo<T> {
+	/// Storage: `Assets::Asset` (r:2 w:0)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::NextPoolId` (r:1 w:1)
+	/// Proof: `AssetRewards::NextPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolCost` (r:0 w:1)
+	/// Proof: `AssetRewards::PoolCost` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::Pools` (r:0 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	fn create_pool() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `358`
+		//  Estimated: `6360`
+		// Minimum execution time: 65_882_000 picoseconds.
+		Weight::from_parts(67_073_000, 0)
+			.saturating_add(Weight::from_parts(0, 6360))
+			.saturating_add(T::DbWeight::get().reads(5))
+			.saturating_add(T::DbWeight::get().writes(5))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::Freezes` (r:1 w:1)
+	/// Proof: `AssetsFreezer::Freezes` (`max_values`: None, `max_size`: Some(87), added: 2562, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:1 w:0)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:1)
+	/// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	fn stake() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `872`
+		//  Estimated: `4809`
+		// Minimum execution time: 56_950_000 picoseconds.
+		Weight::from_parts(58_088_000, 0)
+			.saturating_add(Weight::from_parts(0, 4809))
+			.saturating_add(T::DbWeight::get().reads(5))
+			.saturating_add(T::DbWeight::get().writes(4))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::Freezes` (r:1 w:1)
+	/// Proof: `AssetsFreezer::Freezes` (`max_values`: None, `max_size`: Some(87), added: 2562, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:1 w:0)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:1)
+	/// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	fn unstake() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `872`
+		//  Estimated: `4809`
+		// Minimum execution time: 59_509_000 picoseconds.
+		Weight::from_parts(61_064_000, 0)
+			.saturating_add(Weight::from_parts(0, 4809))
+			.saturating_add(T::DbWeight::get().reads(5))
+			.saturating_add(T::DbWeight::get().writes(4))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:0)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Asset` (r:1 w:1)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:2 w:2)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:0)
+	/// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	fn harvest_rewards() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `1072`
+		//  Estimated: `6208`
+		// Minimum execution time: 80_685_000 picoseconds.
+		Weight::from_parts(83_505_000, 0)
+			.saturating_add(Weight::from_parts(0, 6208))
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(4))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	fn set_pool_reward_rate_per_block() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `318`
+		//  Estimated: `4809`
+		// Minimum execution time: 17_032_000 picoseconds.
+		Weight::from_parts(17_628_000, 0)
+			.saturating_add(Weight::from_parts(0, 4809))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	fn set_pool_admin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `318`
+		//  Estimated: `4809`
+		// Minimum execution time: 15_290_000 picoseconds.
+		Weight::from_parts(16_212_000, 0)
+			.saturating_add(Weight::from_parts(0, 4809))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	fn set_pool_expiry_block() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `318`
+		//  Estimated: `4809`
+		// Minimum execution time: 17_721_000 picoseconds.
+		Weight::from_parts(18_603_000, 0)
+			.saturating_add(Weight::from_parts(0, 4809))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:0)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Asset` (r:1 w:1)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:2 w:2)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:0)
+	/// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	fn deposit_reward_tokens() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `747`
+		//  Estimated: `6208`
+		// Minimum execution time: 67_754_000 picoseconds.
+		Weight::from_parts(69_428_000, 0)
+			.saturating_add(Weight::from_parts(0, 6208))
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(4))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:0)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Asset` (r:1 w:1)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:2 w:2)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:1)
+	/// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:2 w:2)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolCost` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolCost` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::Freezes` (r:0 w:1)
+	/// Proof: `AssetsFreezer::Freezes` (`max_values`: None, `max_size`: Some(87), added: 2562, mode: `MaxEncodedLen`)
+	fn cleanup_pool() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `1105`
+		//  Estimated: `6208`
+		// Minimum execution time: 127_524_000 picoseconds.
+		Weight::from_parts(130_238_000, 0)
+			.saturating_add(Weight::from_parts(0, 6208))
+			.saturating_add(T::DbWeight::get().reads(10))
+			.saturating_add(T::DbWeight::get().writes(10))
+	}
+}
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs
index 08b2f520c4b..0c6ff5e4bfd 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs
@@ -76,6 +76,10 @@ parameter_types! {
 	pub TrustBackedAssetsPalletLocation: Location =
 		PalletInstance(TrustBackedAssetsPalletIndex::get()).into();
 	pub TrustBackedAssetsPalletIndex: u8 = <Assets as PalletInfoAccess>::index() as u8;
+	pub TrustBackedAssetsPalletLocationV3: xcm::v3::Location =
+		xcm::v3::Junction::PalletInstance(<Assets as PalletInfoAccess>::index() as u8).into();
+	pub PoolAssetsPalletLocationV3: xcm::v3::Location =
+		xcm::v3::Junction::PalletInstance(<PoolAssets as PalletInfoAccess>::index() as u8).into();
 	pub ForeignAssetsPalletLocation: Location =
 		PalletInstance(<ForeignAssets as PalletInfoAccess>::index() as u8).into();
 	pub PoolAssetsPalletLocation: Location =
@@ -336,7 +340,7 @@ pub type TrustedTeleporters = (
 /// asset and the asset required for fee payment.
 pub type PoolAssetsExchanger = SingleAssetExchangeAdapter<
 	crate::AssetConversion,
-	crate::NativeAndAssets,
+	crate::NativeAndNonPoolAssets,
 	(
 		TrustBackedAssetsAsLocation<TrustBackedAssetsPalletLocation, Balance, xcm::v5::Location>,
 		ForeignAssetsConvertedConcreteId,
@@ -387,7 +391,7 @@ impl xcm_executor::Config for XcmConfig {
 			TokenLocation,
 			crate::AssetConversion,
 			WeightToFee,
-			crate::NativeAndAssets,
+			crate::NativeAndNonPoolAssets,
 			(
 				TrustBackedAssetsAsLocation<
 					TrustBackedAssetsPalletLocation,
@@ -396,7 +400,7 @@ impl xcm_executor::Config for XcmConfig {
 				>,
 				ForeignAssetsConvertedConcreteId,
 			),
-			ResolveAssetTo<StakingPot, crate::NativeAndAssets>,
+			ResolveAssetTo<StakingPot, crate::NativeAndNonPoolAssets>,
 			AccountId,
 		>,
 		// This trader allows to pay with `is_sufficient=true` "Trust Backed" assets from dedicated
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml
index cb10ae9a480..65ef63a7fb3 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml
@@ -30,6 +30,7 @@ frame-try-runtime = { optional = true, workspace = true }
 pallet-asset-conversion = { workspace = true }
 pallet-asset-conversion-ops = { workspace = true }
 pallet-asset-conversion-tx-payment = { workspace = true }
+pallet-asset-rewards = { workspace = true }
 pallet-assets = { workspace = true }
 pallet-assets-freezer = { workspace = true }
 pallet-aura = { workspace = true }
@@ -62,6 +63,7 @@ sp-std = { workspace = true }
 sp-storage = { workspace = true }
 sp-transaction-pool = { workspace = true }
 sp-version = { workspace = true }
+
 # num-traits feature needed for dex integer sq root:
 primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true }
 
@@ -125,6 +127,7 @@ runtime-benchmarks = [
 	"pallet-asset-conversion-ops/runtime-benchmarks",
 	"pallet-asset-conversion-tx-payment/runtime-benchmarks",
 	"pallet-asset-conversion/runtime-benchmarks",
+	"pallet-asset-rewards/runtime-benchmarks",
 	"pallet-assets-freezer/runtime-benchmarks",
 	"pallet-assets/runtime-benchmarks",
 	"pallet-balances/runtime-benchmarks",
@@ -166,6 +169,7 @@ try-runtime = [
 	"pallet-asset-conversion-ops/try-runtime",
 	"pallet-asset-conversion-tx-payment/try-runtime",
 	"pallet-asset-conversion/try-runtime",
+	"pallet-asset-rewards/try-runtime",
 	"pallet-assets-freezer/try-runtime",
 	"pallet-assets/try-runtime",
 	"pallet-aura/try-runtime",
@@ -218,6 +222,7 @@ std = [
 	"pallet-asset-conversion-ops/std",
 	"pallet-asset-conversion-tx-payment/std",
 	"pallet-asset-conversion/std",
+	"pallet-asset-rewards/std",
 	"pallet-assets-freezer/std",
 	"pallet-assets/std",
 	"pallet-aura/std",
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
index 5966dd01f18..3ef5e87f24c 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
@@ -33,7 +33,7 @@ extern crate alloc;
 use alloc::{vec, vec::Vec};
 use assets_common::{
 	local_and_foreign_assets::{LocalFromLeft, TargetFromLeft},
-	AssetIdForTrustBackedAssetsConvert,
+	AssetIdForPoolAssets, AssetIdForPoolAssetsConvert, AssetIdForTrustBackedAssetsConvert,
 };
 use codec::{Decode, Encode, MaxEncodedLen};
 use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases;
@@ -44,10 +44,12 @@ use frame_support::{
 	genesis_builder_helper::{build_state, get_preset},
 	ord_parameter_types, parameter_types,
 	traits::{
-		fungible, fungibles,
+		fungible,
+		fungible::HoldConsideration,
+		fungibles,
 		tokens::{imbalance::ResolveAssetTo, nonfungibles_v2::Inspect},
-		AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, Equals,
-		InstanceFilter, Nothing, TransformOrigin,
+		AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8,
+		ConstantStoragePrice, Equals, InstanceFilter, Nothing, TransformOrigin,
 	},
 	weights::{ConstantMultiplier, Weight, WeightToFee as _},
 	BoundedVec, PalletId,
@@ -81,8 +83,8 @@ use testnet_parachains_constants::westend::{
 };
 use xcm_config::{
 	ForeignAssetsConvertedConcreteId, LocationToAccountId, PoolAssetsConvertedConcreteId,
-	TrustBackedAssetsConvertedConcreteId, TrustBackedAssetsPalletLocation, WestendLocation,
-	XcmOriginToTransactDispatchOrigin,
+	PoolAssetsPalletLocation, TrustBackedAssetsConvertedConcreteId,
+	TrustBackedAssetsPalletLocation, WestendLocation, XcmOriginToTransactDispatchOrigin,
 };
 
 #[cfg(any(feature = "std", test))]
@@ -93,11 +95,15 @@ use assets_common::{
 	matching::{FromNetwork, FromSiblingParachain},
 };
 use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate};
+use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight};
 use xcm::{
 	latest::prelude::AssetId,
 	prelude::{VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm},
 };
 
+#[cfg(feature = "runtime-benchmarks")]
+use frame_support::traits::PalletInfoAccess;
+
 #[cfg(feature = "runtime-benchmarks")]
 use xcm::latest::prelude::{
 	Asset, Assets as XcmAssets, Fungible, Here, InteriorLocation, Junction, Junction::*, Location,
@@ -109,8 +115,6 @@ use xcm_runtime_apis::{
 	fees::Error as XcmPaymentApiError,
 };
 
-use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight};
-
 impl_opaque_keys! {
 	pub struct SessionKeys {
 		pub aura: Aura,
@@ -218,8 +222,8 @@ impl pallet_balances::Config for Runtime {
 	type ReserveIdentifier = [u8; 8];
 	type RuntimeHoldReason = RuntimeHoldReason;
 	type RuntimeFreezeReason = RuntimeFreezeReason;
-	type FreezeIdentifier = ();
-	type MaxFreezes = ConstU32<0>;
+	type FreezeIdentifier = RuntimeFreezeReason;
+	type MaxFreezes = ConstU32<50>;
 	type DoneSlashHandler = ();
 }
 
@@ -341,8 +345,22 @@ pub type LocalAndForeignAssets = fungibles::UnionOf<
 	xcm::v5::Location,
 	AccountId,
 >;
+
+/// Union fungibles implementation for `AssetsFreezer` and `ForeignAssetsFreezer`.
+pub type LocalAndForeignAssetsFreezer = fungibles::UnionOf<
+	AssetsFreezer,
+	ForeignAssetsFreezer,
+	LocalFromLeft<
+		AssetIdForTrustBackedAssetsConvert<TrustBackedAssetsPalletLocation, xcm::v5::Location>,
+		AssetIdForTrustBackedAssets,
+		xcm::v5::Location,
+	>,
+	xcm::v5::Location,
+	AccountId,
+>;
+
 /// Union fungibles implementation for [`LocalAndForeignAssets`] and `Balances`.
-pub type NativeAndAssets = fungible::UnionOf<
+pub type NativeAndNonPoolAssets = fungible::UnionOf<
 	Balances,
 	LocalAndForeignAssets,
 	TargetFromLeft<WestendLocation, xcm::v5::Location>,
@@ -350,6 +368,45 @@ pub type NativeAndAssets = fungible::UnionOf<
 	AccountId,
 >;
 
+/// Union fungibles implementation for [`LocalAndForeignAssetsFreezer`] and [`Balances`].
+pub type NativeAndNonPoolAssetsFreezer = fungible::UnionOf<
+	Balances,
+	LocalAndForeignAssetsFreezer,
+	TargetFromLeft<WestendLocation, xcm::v5::Location>,
+	xcm::v5::Location,
+	AccountId,
+>;
+
+/// Union fungibles implementation for [`PoolAssets`] and [`NativeAndNonPoolAssets`].
+///
+/// NOTE: Should be kept updated to include ALL balances and assets in the runtime.
+pub type NativeAndAllAssets = fungibles::UnionOf<
+	PoolAssets,
+	NativeAndNonPoolAssets,
+	LocalFromLeft<
+		AssetIdForPoolAssetsConvert<PoolAssetsPalletLocation, xcm::v5::Location>,
+		AssetIdForPoolAssets,
+		xcm::v5::Location,
+	>,
+	xcm::v5::Location,
+	AccountId,
+>;
+
+/// Union fungibles implementation for [`PoolAssetsFreezer`] and [`NativeAndNonPoolAssetsFreezer`].
+///
+/// NOTE: Should be kept updated to include ALL balances and assets in the runtime.
+pub type NativeAndAllAssetsFreezer = fungibles::UnionOf<
+	PoolAssetsFreezer,
+	NativeAndNonPoolAssetsFreezer,
+	LocalFromLeft<
+		AssetIdForPoolAssetsConvert<PoolAssetsPalletLocation, xcm::v5::Location>,
+		AssetIdForPoolAssets,
+		xcm::v5::Location,
+	>,
+	xcm::v5::Location,
+	AccountId,
+>;
+
 pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter<
 	AssetConversionPalletId,
 	(xcm::v5::Location, xcm::v5::Location),
@@ -360,7 +417,7 @@ impl pallet_asset_conversion::Config for Runtime {
 	type Balance = Balance;
 	type HigherPrecisionBalance = sp_core::U256;
 	type AssetKind = xcm::v5::Location;
-	type Assets = NativeAndAssets;
+	type Assets = NativeAndNonPoolAssets;
 	type PoolId = (Self::AssetKind, Self::AssetKind);
 	type PoolLocator = pallet_asset_conversion::WithFirstAsset<
 		WestendLocation,
@@ -388,6 +445,55 @@ impl pallet_asset_conversion::Config for Runtime {
 	>;
 }
 
+#[cfg(feature = "runtime-benchmarks")]
+pub struct PalletAssetRewardsBenchmarkHelper;
+
+#[cfg(feature = "runtime-benchmarks")]
+impl pallet_asset_rewards::benchmarking::BenchmarkHelper<xcm::v5::Location>
+	for PalletAssetRewardsBenchmarkHelper
+{
+	fn staked_asset() -> Location {
+		Location::new(
+			0,
+			[PalletInstance(<Assets as PalletInfoAccess>::index() as u8), GeneralIndex(100)],
+		)
+	}
+	fn reward_asset() -> Location {
+		Location::new(
+			0,
+			[PalletInstance(<Assets as PalletInfoAccess>::index() as u8), GeneralIndex(101)],
+		)
+	}
+}
+
+parameter_types! {
+	pub const AssetRewardsPalletId: PalletId = PalletId(*b"py/astrd");
+	pub const RewardsPoolCreationHoldReason: RuntimeHoldReason =
+		RuntimeHoldReason::AssetRewards(pallet_asset_rewards::HoldReason::PoolCreation);
+	// 1 item, 135 bytes into the storage on pool creation.
+	pub const StakePoolCreationDeposit: Balance = deposit(1, 135);
+}
+
+impl pallet_asset_rewards::Config for Runtime {
+	type RuntimeEvent = RuntimeEvent;
+	type PalletId = AssetRewardsPalletId;
+	type Balance = Balance;
+	type Assets = NativeAndAllAssets;
+	type AssetsFreezer = NativeAndAllAssetsFreezer;
+	type AssetId = xcm::v5::Location;
+	type CreatePoolOrigin = EnsureSigned<AccountId>;
+	type RuntimeFreezeReason = RuntimeFreezeReason;
+	type Consideration = HoldConsideration<
+		AccountId,
+		Balances,
+		RewardsPoolCreationHoldReason,
+		ConstantStoragePrice<StakePoolCreationDeposit, Balance>,
+	>;
+	type WeightInfo = weights::pallet_asset_rewards::WeightInfo<Runtime>;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = PalletAssetRewardsBenchmarkHelper;
+}
+
 impl pallet_asset_conversion_ops::Config for Runtime {
 	type RuntimeEvent = RuntimeEvent;
 	type PriorAccountIdConverter = pallet_asset_conversion::AccountIdConverterNoSeed<
@@ -816,9 +922,9 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime {
 	type AssetId = xcm::v5::Location;
 	type OnChargeAssetTransaction = SwapAssetAdapter<
 		WestendLocation,
-		NativeAndAssets,
+		NativeAndNonPoolAssets,
 		AssetConversion,
-		ResolveAssetTo<StakingPot, NativeAndAssets>,
+		ResolveAssetTo<StakingPot, NativeAndNonPoolAssets>,
 	>;
 	type WeightInfo = weights::pallet_asset_conversion_tx_payment::WeightInfo<Runtime>;
 	#[cfg(feature = "runtime-benchmarks")]
@@ -1035,11 +1141,14 @@ construct_runtime!(
 		NftFractionalization: pallet_nft_fractionalization = 54,
 		PoolAssets: pallet_assets::<Instance3> = 55,
 		AssetConversion: pallet_asset_conversion = 56,
+
 		AssetsFreezer: pallet_assets_freezer::<Instance1> = 57,
 		ForeignAssetsFreezer: pallet_assets_freezer::<Instance2> = 58,
 		PoolAssetsFreezer: pallet_assets_freezer::<Instance3> = 59,
 		Revive: pallet_revive = 60,
 
+		AssetRewards: pallet_asset_rewards = 61,
+
 		StateTrieMigration: pallet_state_trie_migration = 70,
 
 		// TODO: the pallet instance should be removed once all pools have migrated
@@ -1317,6 +1426,7 @@ mod benches {
 		[pallet_assets, Foreign]
 		[pallet_assets, Pool]
 		[pallet_asset_conversion, AssetConversion]
+		[pallet_asset_rewards, AssetRewards]
 		[pallet_asset_conversion_tx_payment, AssetTxPayment]
 		[pallet_balances, Balances]
 		[pallet_message_queue, MessageQueue]
@@ -1674,6 +1784,12 @@ impl_runtime_apis! {
 		}
 	}
 
+	impl pallet_asset_rewards::AssetRewards<Block, Balance> for Runtime {
+		fn pool_creation_cost() -> Balance {
+			StakePoolCreationDeposit::get()
+		}
+	}
+
 	impl cumulus_primitives_core::GetCoreSelectorApi<Block> for Runtime {
 		fn core_selector() -> (CoreSelector, ClaimQueueOffset) {
 			ParachainSystem::core_selector()
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs
index 442b58635f4..d653838ad80 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs
@@ -23,6 +23,7 @@ pub mod frame_system_extensions;
 pub mod pallet_asset_conversion;
 pub mod pallet_asset_conversion_ops;
 pub mod pallet_asset_conversion_tx_payment;
+pub mod pallet_asset_rewards;
 pub mod pallet_assets_foreign;
 pub mod pallet_assets_local;
 pub mod pallet_assets_pool;
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_rewards.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_rewards.rs
new file mode 100644
index 00000000000..3bbc289fec7
--- /dev/null
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_rewards.rs
@@ -0,0 +1,217 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `pallet_asset_rewards`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024
+
+// Executed Command:
+// target/production/polkadot-parachain
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_asset_rewards
+// --chain=asset-hub-westend-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::Weight};
+use core::marker::PhantomData;
+
+/// Weight functions for `pallet_asset_rewards`.
+pub struct WeightInfo<T>(PhantomData<T>);
+impl<T: frame_system::Config> pallet_asset_rewards::WeightInfo for WeightInfo<T> {
+	/// Storage: `Assets::Asset` (r:2 w:0)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::NextPoolId` (r:1 w:1)
+	/// Proof: `AssetRewards::NextPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolCost` (r:0 w:1)
+	/// Proof: `AssetRewards::PoolCost` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::Pools` (r:0 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	fn create_pool() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `392`
+		//  Estimated: `6360`
+		// Minimum execution time: 60_734_000 picoseconds.
+		Weight::from_parts(61_828_000, 0)
+			.saturating_add(Weight::from_parts(0, 6360))
+			.saturating_add(T::DbWeight::get().reads(5))
+			.saturating_add(T::DbWeight::get().writes(5))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::Freezes` (r:1 w:1)
+	/// Proof: `AssetsFreezer::Freezes` (`max_values`: None, `max_size`: Some(87), added: 2562, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:1 w:0)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:1)
+	/// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	fn stake() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `906`
+		//  Estimated: `4809`
+		// Minimum execution time: 56_014_000 picoseconds.
+		Weight::from_parts(58_487_000, 0)
+			.saturating_add(Weight::from_parts(0, 4809))
+			.saturating_add(T::DbWeight::get().reads(5))
+			.saturating_add(T::DbWeight::get().writes(4))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::Freezes` (r:1 w:1)
+	/// Proof: `AssetsFreezer::Freezes` (`max_values`: None, `max_size`: Some(87), added: 2562, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:1 w:0)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:1)
+	/// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	fn unstake() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `906`
+		//  Estimated: `4809`
+		// Minimum execution time: 59_071_000 picoseconds.
+		Weight::from_parts(60_631_000, 0)
+			.saturating_add(Weight::from_parts(0, 4809))
+			.saturating_add(T::DbWeight::get().reads(5))
+			.saturating_add(T::DbWeight::get().writes(4))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:0)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Asset` (r:1 w:1)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:2 w:2)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:0)
+	/// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	fn harvest_rewards() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `1106`
+		//  Estimated: `6208`
+		// Minimum execution time: 80_585_000 picoseconds.
+		Weight::from_parts(82_186_000, 0)
+			.saturating_add(Weight::from_parts(0, 6208))
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(4))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	fn set_pool_reward_rate_per_block() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `318`
+		//  Estimated: `4809`
+		// Minimum execution time: 17_083_000 picoseconds.
+		Weight::from_parts(17_816_000, 0)
+			.saturating_add(Weight::from_parts(0, 4809))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	fn set_pool_admin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `318`
+		//  Estimated: `4809`
+		// Minimum execution time: 15_269_000 picoseconds.
+		Weight::from_parts(15_881_000, 0)
+			.saturating_add(Weight::from_parts(0, 4809))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	fn set_pool_expiry_block() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `318`
+		//  Estimated: `4809`
+		// Minimum execution time: 17_482_000 picoseconds.
+		Weight::from_parts(18_124_000, 0)
+			.saturating_add(Weight::from_parts(0, 4809))
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:0)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Asset` (r:1 w:1)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:2 w:2)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:0)
+	/// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	fn deposit_reward_tokens() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `781`
+		//  Estimated: `6208`
+		// Minimum execution time: 66_644_000 picoseconds.
+		Weight::from_parts(67_950_000, 0)
+			.saturating_add(Weight::from_parts(0, 6208))
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(4))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:0)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Asset` (r:1 w:1)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:2 w:2)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:1)
+	/// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:2 w:2)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolCost` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolCost` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::Freezes` (r:0 w:1)
+	/// Proof: `AssetsFreezer::Freezes` (`max_values`: None, `max_size`: Some(87), added: 2562, mode: `MaxEncodedLen`)
+	fn cleanup_pool() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `1139`
+		//  Estimated: `6208`
+		// Minimum execution time: 124_136_000 picoseconds.
+		Weight::from_parts(128_642_000, 0)
+			.saturating_add(Weight::from_parts(0, 6208))
+			.saturating_add(T::DbWeight::get().reads(10))
+			.saturating_add(T::DbWeight::get().writes(10))
+	}
+}
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs
index b4e938f1f8b..1ea2ce5136a 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs
@@ -65,6 +65,7 @@ use xcm_executor::XcmExecutor;
 parameter_types! {
 	pub const RootLocation: Location = Location::here();
 	pub const WestendLocation: Location = Location::parent();
+	pub const GovernanceLocation: Location = Location::parent();
 	pub const RelayNetwork: Option<NetworkId> = Some(NetworkId::ByGenesis(WESTEND_GENESIS_HASH));
 	pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into();
 	pub UniversalLocation: InteriorLocation =
@@ -359,7 +360,7 @@ pub type TrustedTeleporters = (
 /// asset and the asset required for fee payment.
 pub type PoolAssetsExchanger = SingleAssetExchangeAdapter<
 	crate::AssetConversion,
-	crate::NativeAndAssets,
+	crate::NativeAndNonPoolAssets,
 	(
 		TrustBackedAssetsAsLocation<TrustBackedAssetsPalletLocation, Balance, xcm::v5::Location>,
 		ForeignAssetsConvertedConcreteId,
@@ -409,7 +410,7 @@ impl xcm_executor::Config for XcmConfig {
 			WestendLocation,
 			crate::AssetConversion,
 			WeightToFee,
-			crate::NativeAndAssets,
+			crate::NativeAndNonPoolAssets,
 			(
 				TrustBackedAssetsAsLocation<
 					TrustBackedAssetsPalletLocation,
@@ -418,7 +419,7 @@ impl xcm_executor::Config for XcmConfig {
 				>,
 				ForeignAssetsConvertedConcreteId,
 			),
-			ResolveAssetTo<StakingPot, crate::NativeAndAssets>,
+			ResolveAssetTo<StakingPot, crate::NativeAndNonPoolAssets>,
 			AccountId,
 		>,
 		// This trader allows to pay with `is_sufficient=true` "Trust Backed" assets from dedicated
diff --git a/cumulus/parachains/runtimes/assets/common/src/lib.rs b/cumulus/parachains/runtimes/assets/common/src/lib.rs
index 25c2df6b68d..50b1b63146b 100644
--- a/cumulus/parachains/runtimes/assets/common/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/common/src/lib.rs
@@ -123,10 +123,11 @@ pub type ForeignAssetsConvertedConcreteId<
 	BalanceConverter,
 >;
 
-type AssetIdForPoolAssets = u32;
+pub type AssetIdForPoolAssets = u32;
+
 /// `Location` vs `AssetIdForPoolAssets` converter for `PoolAssets`.
-pub type AssetIdForPoolAssetsConvert<PoolAssetsPalletLocation> =
-	AsPrefixedGeneralIndex<PoolAssetsPalletLocation, AssetIdForPoolAssets, TryConvertInto>;
+pub type AssetIdForPoolAssetsConvert<PoolAssetsPalletLocation, L = Location> =
+	AsPrefixedGeneralIndex<PoolAssetsPalletLocation, AssetIdForPoolAssets, TryConvertInto, L>;
 /// [`MatchedConvertedConcreteId`] converter dedicated for `PoolAssets`
 pub type PoolAssetsConvertedConcreteId<PoolAssetsPalletLocation, Balance> =
 	MatchedConvertedConcreteId<
diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs
index bb77ec0000e..10c3f6c0cbf 100644
--- a/polkadot/runtime/rococo/src/xcm_config.rs
+++ b/polkadot/runtime/rococo/src/xcm_config.rs
@@ -18,7 +18,8 @@
 
 use super::{
 	parachains_origin, AccountId, AllPalletsWithSystem, Balances, Dmp, Fellows, ParaId, Runtime,
-	RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, Treasury, WeightToFee, XcmPallet,
+	RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, Treasurer, Treasury, WeightToFee,
+	XcmPallet,
 };
 
 use crate::governance::StakingAdmin;
@@ -228,11 +229,14 @@ impl xcm_executor::Config for XcmConfig {
 }
 
 parameter_types! {
+	/// Collective pluralistic body.
 	pub const CollectiveBodyId: BodyId = BodyId::Unit;
-	// StakingAdmin pluralistic body.
+	/// StakingAdmin pluralistic body.
 	pub const StakingAdminBodyId: BodyId = BodyId::Defense;
-	// Fellows pluralistic body.
+	/// Fellows pluralistic body.
 	pub const FellowsBodyId: BodyId = BodyId::Technical;
+	/// Treasury pluralistic body.
+	pub const TreasuryBodyId: BodyId = BodyId::Treasury;
 }
 
 /// Type to convert an `Origin` type value into a `Location` value which represents an interior
@@ -249,6 +253,9 @@ pub type StakingAdminToPlurality =
 /// Type to convert the Fellows origin to a Plurality `Location` value.
 pub type FellowsToPlurality = OriginToPluralityVoice<RuntimeOrigin, Fellows, FellowsBodyId>;
 
+/// Type to convert the Treasury origin to a Plurality `Location` value.
+pub type TreasurerToPlurality = OriginToPluralityVoice<RuntimeOrigin, Treasurer, TreasuryBodyId>;
+
 /// Type to convert a pallet `Origin` type value into a `Location` value which represents an
 /// interior location of this chain for a destination chain.
 pub type LocalPalletOriginToLocation = (
@@ -256,13 +263,18 @@ pub type LocalPalletOriginToLocation = (
 	StakingAdminToPlurality,
 	// Fellows origin to be used in XCM as a corresponding Plurality `Location` value.
 	FellowsToPlurality,
+	// Treasurer origin to be used in XCM as a corresponding Plurality `Location` value.
+	TreasurerToPlurality,
 );
 
 impl pallet_xcm::Config for Runtime {
 	type RuntimeEvent = RuntimeEvent;
 	// Note that this configuration of `SendXcmOrigin` is different from the one present in
 	// production.
-	type SendXcmOrigin = xcm_builder::EnsureXcmOrigin<RuntimeOrigin, LocalOriginToLocation>;
+	type SendXcmOrigin = xcm_builder::EnsureXcmOrigin<
+		RuntimeOrigin,
+		(LocalPalletOriginToLocation, LocalOriginToLocation),
+	>;
 	type XcmRouter = XcmRouter;
 	// Anyone can execute XCM messages locally.
 	type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin<RuntimeOrigin, LocalOriginToLocation>;
diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs
index 3f6a7304c8a..4235edf82b2 100644
--- a/polkadot/runtime/westend/src/xcm_config.rs
+++ b/polkadot/runtime/westend/src/xcm_config.rs
@@ -280,7 +280,10 @@ impl pallet_xcm::Config for Runtime {
 	type RuntimeEvent = RuntimeEvent;
 	// Note that this configuration of `SendXcmOrigin` is different from the one present in
 	// production.
-	type SendXcmOrigin = xcm_builder::EnsureXcmOrigin<RuntimeOrigin, LocalOriginToLocation>;
+	type SendXcmOrigin = xcm_builder::EnsureXcmOrigin<
+		RuntimeOrigin,
+		(LocalPalletOriginToLocation, LocalOriginToLocation),
+	>;
 	type XcmRouter = XcmRouter;
 	// Anyone can execute XCM messages locally.
 	type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin<RuntimeOrigin, LocalOriginToLocation>;
diff --git a/prdoc/pr_3926.prdoc b/prdoc/pr_3926.prdoc
new file mode 100644
index 00000000000..7f352f7a45f
--- /dev/null
+++ b/prdoc/pr_3926.prdoc
@@ -0,0 +1,30 @@
+title: Introduce pallet-asset-rewards
+
+doc:
+  - audience: Runtime Dev
+    description: |
+        Introduce pallet-asset-rewards, which allows accounts to be rewarded for freezing fungible 
+        tokens. The motivation for creating this pallet is to allow incentivising LPs.
+        See the pallet docs for more info about the pallet.
+
+crates:
+  - name: pallet-asset-rewards
+    bump: major
+  - name: polkadot-sdk
+    bump: minor
+  - name: kitchensink-runtime
+    bump: major
+  - name: asset-hub-rococo-runtime
+    bump: major
+  - name: asset-hub-westend-runtime
+    bump: major
+  - name: assets-common
+    bump: minor
+  - name: rococo-runtime
+    bump: minor
+  - name: westend-runtime
+    bump: patch
+  - name: frame-support
+    bump: minor
+  - name: emulated-integration-tests-common
+    bump: minor
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index 97728f12f5f..de377a55bc8 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -56,10 +56,10 @@ use frame_support::{
 			imbalance::ResolveAssetTo, nonfungibles_v2::Inspect, pay::PayAssetFromAccount,
 			GetSalary, PayFromAccount,
 		},
-		AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, ConstU64, Contains,
-		Currency, EitherOfDiverse, EnsureOriginWithArg, EqualPrivilegeOnly, Imbalance, InsideBoth,
-		InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, LockIdentifier, Nothing,
-		OnUnbalanced, VariantCountOf, WithdrawReasons,
+		AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, ConstU64,
+		ConstantStoragePrice, Contains, Currency, EitherOfDiverse, EnsureOriginWithArg,
+		EqualPrivilegeOnly, Imbalance, InsideBoth, InstanceFilter, KeyOwnerProofSystem,
+		LinearStoragePrice, LockIdentifier, Nothing, OnUnbalanced, VariantCountOf, WithdrawReasons,
 	},
 	weights::{
 		constants::{
@@ -511,7 +511,8 @@ impl pallet_glutton::Config for Runtime {
 }
 
 parameter_types! {
-	pub const PreimageHoldReason: RuntimeHoldReason = RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage);
+	pub const PreimageHoldReason: RuntimeHoldReason =
+		RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage);
 }
 
 impl pallet_preimage::Config for Runtime {
@@ -618,6 +619,12 @@ impl pallet_transaction_payment::Config for Runtime {
 	type WeightInfo = pallet_transaction_payment::weights::SubstrateWeight<Runtime>;
 }
 
+pub type AssetsFreezerInstance = pallet_assets_freezer::Instance1;
+impl pallet_assets_freezer::Config<AssetsFreezerInstance> for Runtime {
+	type RuntimeFreezeReason = RuntimeFreezeReason;
+	type RuntimeEvent = RuntimeEvent;
+}
+
 impl pallet_asset_conversion_tx_payment::Config for Runtime {
 	type RuntimeEvent = RuntimeEvent;
 	type AssetId = NativeOrWithId<u32>;
@@ -1858,6 +1865,53 @@ impl pallet_asset_conversion::Config for Runtime {
 	type BenchmarkHelper = ();
 }
 
+pub type NativeAndAssetsFreezer =
+	UnionOf<Balances, AssetsFreezer, NativeFromLeft, NativeOrWithId<u32>, AccountId>;
+
+/// Benchmark Helper
+#[cfg(feature = "runtime-benchmarks")]
+pub struct AssetRewardsBenchmarkHelper;
+
+#[cfg(feature = "runtime-benchmarks")]
+impl pallet_asset_rewards::benchmarking::BenchmarkHelper<NativeOrWithId<u32>>
+	for AssetRewardsBenchmarkHelper
+{
+	fn staked_asset() -> NativeOrWithId<u32> {
+		NativeOrWithId::<u32>::WithId(100)
+	}
+	fn reward_asset() -> NativeOrWithId<u32> {
+		NativeOrWithId::<u32>::WithId(101)
+	}
+}
+
+parameter_types! {
+	pub const StakingRewardsPalletId: PalletId = PalletId(*b"py/stkrd");
+	pub const CreationHoldReason: RuntimeHoldReason =
+		RuntimeHoldReason::AssetRewards(pallet_asset_rewards::HoldReason::PoolCreation);
+	// 1 item, 135 bytes into the storage on pool creation.
+	pub const StakePoolCreationDeposit: Balance = deposit(1, 135);
+}
+
+impl pallet_asset_rewards::Config for Runtime {
+	type RuntimeEvent = RuntimeEvent;
+	type RuntimeFreezeReason = RuntimeFreezeReason;
+	type AssetId = NativeOrWithId<u32>;
+	type Balance = Balance;
+	type Assets = NativeAndAssets;
+	type PalletId = StakingRewardsPalletId;
+	type CreatePoolOrigin = EnsureSigned<AccountId>;
+	type WeightInfo = ();
+	type AssetsFreezer = NativeAndAssetsFreezer;
+	type Consideration = HoldConsideration<
+		AccountId,
+		Balances,
+		CreationHoldReason,
+		ConstantStoragePrice<StakePoolCreationDeposit, Balance>,
+	>;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = AssetRewardsBenchmarkHelper;
+}
+
 impl pallet_asset_conversion_ops::Config for Runtime {
 	type RuntimeEvent = RuntimeEvent;
 	type PriorAccountIdConverter = pallet_asset_conversion::AccountIdConverterNoSeed<(
@@ -2636,6 +2690,12 @@ mod runtime {
 
 	#[runtime::pallet_index(81)]
 	pub type VerifySignature = pallet_verify_signature::Pallet<Runtime>;
+
+	#[runtime::pallet_index(83)]
+	pub type AssetRewards = pallet_asset_rewards::Pallet<Runtime>;
+
+	#[runtime::pallet_index(84)]
+	pub type AssetsFreezer = pallet_assets_freezer::Pallet<Runtime, Instance1>;
 }
 
 impl TryFrom<RuntimeCall> for pallet_revive::Call<Runtime> {
@@ -2846,6 +2906,7 @@ mod benches {
 		[pallet_example_tasks, TasksExample]
 		[pallet_democracy, Democracy]
 		[pallet_asset_conversion, AssetConversion]
+		[pallet_asset_rewards, AssetRewards]
 		[pallet_asset_conversion_tx_payment, AssetConversionTxPayment]
 		[pallet_transaction_payment, TransactionPayment]
 		[pallet_election_provider_multi_phase, ElectionProviderMultiPhase]
@@ -3553,6 +3614,12 @@ impl_runtime_apis! {
 		}
 	}
 
+	impl pallet_asset_rewards::AssetRewards<Block, Balance> for Runtime {
+		fn pool_creation_cost() -> Balance {
+			StakePoolCreationDeposit::get()
+		}
+	}
+
 	#[cfg(feature = "try-runtime")]
 	impl frame_try_runtime::TryRuntime<Block> for Runtime {
 		fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) {
diff --git a/substrate/frame/asset-rewards/Cargo.toml b/substrate/frame/asset-rewards/Cargo.toml
new file mode 100644
index 00000000000..a03fa17cf0d
--- /dev/null
+++ b/substrate/frame/asset-rewards/Cargo.toml
@@ -0,0 +1,71 @@
+[package]
+name = "pallet-asset-rewards"
+version = "0.1.0"
+authors.workspace = true
+edition.workspace = true
+license = "Apache-2.0"
+homepage.workspace = true
+repository.workspace = true
+description = "FRAME asset rewards pallet"
+
+[lints]
+workspace = true
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[dependencies]
+codec = { workspace = true }
+frame-benchmarking = { workspace = true, optional = true }
+frame-support = { workspace = true, features = ["experimental"] }
+frame-system = { workspace = true }
+scale-info = { workspace = true, features = ["derive"] }
+sp-api = { workspace = true }
+sp-arithmetic = { workspace = true }
+sp-core = { workspace = true }
+sp-io = { workspace = true }
+sp-runtime = { workspace = true }
+sp-std = { workspace = true }
+
+[dev-dependencies]
+pallet-assets = { workspace = true }
+pallet-assets-freezer = { workspace = true }
+pallet-balances = { workspace = true }
+primitive-types = { workspace = true, features = ["codec", "num-traits", "scale-info"] }
+
+[features]
+default = ["std"]
+std = [
+	"codec/std",
+	"frame-benchmarking?/std",
+	"frame-support/std",
+	"frame-system/std",
+	"pallet-assets-freezer/std",
+	"pallet-assets/std",
+	"pallet-balances/std",
+	"primitive-types/std",
+	"scale-info/std",
+	"sp-api/std",
+	"sp-arithmetic/std",
+	"sp-core/std",
+	"sp-io/std",
+	"sp-runtime/std",
+	"sp-std/std",
+]
+runtime-benchmarks = [
+	"frame-benchmarking/runtime-benchmarks",
+	"frame-support/runtime-benchmarks",
+	"frame-system/runtime-benchmarks",
+	"pallet-assets-freezer/runtime-benchmarks",
+	"pallet-assets/runtime-benchmarks",
+	"pallet-balances/runtime-benchmarks",
+	"sp-runtime/runtime-benchmarks",
+]
+try-runtime = [
+	"frame-support/try-runtime",
+	"frame-system/try-runtime",
+	"pallet-assets-freezer/try-runtime",
+	"pallet-assets/try-runtime",
+	"pallet-balances/try-runtime",
+	"sp-runtime/try-runtime",
+]
diff --git a/substrate/frame/asset-rewards/src/benchmarking.rs b/substrate/frame/asset-rewards/src/benchmarking.rs
new file mode 100644
index 00000000000..5605804dd20
--- /dev/null
+++ b/substrate/frame/asset-rewards/src/benchmarking.rs
@@ -0,0 +1,355 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Asset Rewards pallet benchmarking.
+
+use super::*;
+use crate::Pallet as AssetRewards;
+use frame_benchmarking::{v2::*, whitelisted_caller, BenchmarkError};
+use frame_support::{
+	assert_ok,
+	traits::{
+		fungibles::{Create, Inspect, Mutate},
+		Consideration, EnsureOrigin, Footprint,
+	},
+};
+use frame_system::{pallet_prelude::BlockNumberFor, Pallet as System, RawOrigin};
+use sp_runtime::{traits::One, Saturating};
+use sp_std::prelude::*;
+
+/// Benchmark Helper
+pub trait BenchmarkHelper<AssetId> {
+	/// Returns the staked asset id.
+	///
+	/// If the asset does not exist, it will be created by the benchmark.
+	fn staked_asset() -> AssetId;
+	/// Returns the reward asset id.
+	///
+	/// If the asset does not exist, it will be created by the benchmark.
+	fn reward_asset() -> AssetId;
+}
+
+fn pool_expire<T: Config>() -> DispatchTime<BlockNumberFor<T>> {
+	DispatchTime::At(BlockNumberFor::<T>::from(100u32))
+}
+
+fn create_reward_pool<T: Config>() -> Result<T::RuntimeOrigin, BenchmarkError>
+where
+	T::Assets: Create<T::AccountId> + Mutate<T::AccountId>,
+{
+	let caller_origin =
+		T::CreatePoolOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?;
+	let caller = T::CreatePoolOrigin::ensure_origin(caller_origin.clone()).unwrap();
+
+	let footprint = Footprint::from_mel::<(PoolId, PoolInfoFor<T>)>();
+	T::Consideration::ensure_successful(&caller, footprint);
+
+	let staked_asset = T::BenchmarkHelper::staked_asset();
+	let reward_asset = T::BenchmarkHelper::reward_asset();
+
+	let min_staked_balance =
+		T::Assets::minimum_balance(staked_asset.clone()).max(T::Balance::one());
+	if !T::Assets::asset_exists(staked_asset.clone()) {
+		assert_ok!(T::Assets::create(
+			staked_asset.clone(),
+			caller.clone(),
+			true,
+			min_staked_balance
+		));
+	}
+	let min_reward_balance =
+		T::Assets::minimum_balance(reward_asset.clone()).max(T::Balance::one());
+	if !T::Assets::asset_exists(reward_asset.clone()) {
+		assert_ok!(T::Assets::create(
+			reward_asset.clone(),
+			caller.clone(),
+			true,
+			min_reward_balance
+		));
+	}
+
+	assert_ok!(AssetRewards::<T>::create_pool(
+		caller_origin.clone(),
+		Box::new(staked_asset),
+		Box::new(reward_asset),
+		// reward rate per block
+		min_reward_balance,
+		pool_expire::<T>(),
+		Some(caller),
+	));
+
+	Ok(caller_origin)
+}
+
+fn mint_into<T: Config>(caller: &T::AccountId, asset: &T::AssetId) -> T::Balance
+where
+	T::Assets: Mutate<T::AccountId>,
+{
+	let min_balance = T::Assets::minimum_balance(asset.clone());
+	assert_ok!(T::Assets::mint_into(
+		asset.clone(),
+		&caller,
+		min_balance.saturating_mul(10u32.into())
+	));
+	min_balance
+}
+
+fn assert_last_event<T: Config>(generic_event: <T as Config>::RuntimeEvent) {
+	System::<T>::assert_last_event(generic_event.into());
+}
+
+#[benchmarks(where T::Assets: Create<T::AccountId> + Mutate<T::AccountId>)]
+mod benchmarks {
+	use super::*;
+
+	#[benchmark]
+	fn create_pool() -> Result<(), BenchmarkError> {
+		let caller_origin =
+			T::CreatePoolOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?;
+		let caller = T::CreatePoolOrigin::ensure_origin(caller_origin.clone()).unwrap();
+
+		let footprint = Footprint::from_mel::<(PoolId, PoolInfoFor<T>)>();
+		T::Consideration::ensure_successful(&caller, footprint);
+
+		let staked_asset = T::BenchmarkHelper::staked_asset();
+		let reward_asset = T::BenchmarkHelper::reward_asset();
+
+		let min_balance = T::Assets::minimum_balance(staked_asset.clone()).max(T::Balance::one());
+		if !T::Assets::asset_exists(staked_asset.clone()) {
+			assert_ok!(T::Assets::create(staked_asset.clone(), caller.clone(), true, min_balance));
+		}
+		let min_balance = T::Assets::minimum_balance(reward_asset.clone()).max(T::Balance::one());
+		if !T::Assets::asset_exists(reward_asset.clone()) {
+			assert_ok!(T::Assets::create(reward_asset.clone(), caller.clone(), true, min_balance));
+		}
+
+		#[extrinsic_call]
+		_(
+			caller_origin as T::RuntimeOrigin,
+			Box::new(staked_asset.clone()),
+			Box::new(reward_asset.clone()),
+			min_balance,
+			pool_expire::<T>(),
+			Some(caller.clone()),
+		);
+
+		assert_last_event::<T>(
+			Event::PoolCreated {
+				creator: caller.clone(),
+				admin: caller,
+				staked_asset_id: staked_asset,
+				reward_asset_id: reward_asset,
+				reward_rate_per_block: min_balance,
+				expiry_block: pool_expire::<T>().evaluate(System::<T>::block_number()),
+				pool_id: 0,
+			}
+			.into(),
+		);
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn stake() -> Result<(), BenchmarkError> {
+		create_reward_pool::<T>()?;
+
+		let staker: T::AccountId = whitelisted_caller();
+		let min_balance = mint_into::<T>(&staker, &T::BenchmarkHelper::staked_asset());
+
+		// stake first to get worth case benchmark.
+		assert_ok!(AssetRewards::<T>::stake(
+			RawOrigin::Signed(staker.clone()).into(),
+			0,
+			min_balance
+		));
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(staker.clone()), 0, min_balance);
+
+		assert_last_event::<T>(Event::Staked { staker, pool_id: 0, amount: min_balance }.into());
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn unstake() -> Result<(), BenchmarkError> {
+		create_reward_pool::<T>()?;
+
+		let staker: T::AccountId = whitelisted_caller();
+		let min_balance = mint_into::<T>(&staker, &T::BenchmarkHelper::staked_asset());
+
+		assert_ok!(AssetRewards::<T>::stake(
+			RawOrigin::Signed(staker.clone()).into(),
+			0,
+			min_balance,
+		));
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(staker.clone()), 0, min_balance, None);
+
+		assert_last_event::<T>(
+			Event::Unstaked { caller: staker.clone(), staker, pool_id: 0, amount: min_balance }
+				.into(),
+		);
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn harvest_rewards() -> Result<(), BenchmarkError> {
+		create_reward_pool::<T>()?;
+
+		let pool_acc = AssetRewards::<T>::pool_account_id(&0u32);
+		let min_reward_balance = mint_into::<T>(&pool_acc, &T::BenchmarkHelper::reward_asset());
+
+		let staker = whitelisted_caller();
+		let _ = mint_into::<T>(&staker, &T::BenchmarkHelper::staked_asset());
+		assert_ok!(AssetRewards::<T>::stake(
+			RawOrigin::Signed(staker.clone()).into(),
+			0,
+			T::Balance::one(),
+		));
+
+		System::<T>::set_block_number(System::<T>::block_number() + BlockNumberFor::<T>::one());
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(staker.clone()), 0, None);
+
+		assert_last_event::<T>(
+			Event::RewardsHarvested {
+				caller: staker.clone(),
+				staker,
+				pool_id: 0,
+				amount: min_reward_balance,
+			}
+			.into(),
+		);
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn set_pool_reward_rate_per_block() -> Result<(), BenchmarkError> {
+		let caller_origin = create_reward_pool::<T>()?;
+
+		// stake first to get worth case benchmark.
+		{
+			let staker: T::AccountId = whitelisted_caller();
+			let min_balance = mint_into::<T>(&staker, &T::BenchmarkHelper::staked_asset());
+
+			assert_ok!(AssetRewards::<T>::stake(RawOrigin::Signed(staker).into(), 0, min_balance));
+		}
+
+		let new_reward_rate_per_block =
+			T::Assets::minimum_balance(T::BenchmarkHelper::reward_asset()).max(T::Balance::one()) +
+				T::Balance::one();
+
+		#[extrinsic_call]
+		_(caller_origin as T::RuntimeOrigin, 0, new_reward_rate_per_block);
+
+		assert_last_event::<T>(
+			Event::PoolRewardRateModified { pool_id: 0, new_reward_rate_per_block }.into(),
+		);
+		Ok(())
+	}
+
+	#[benchmark]
+	fn set_pool_admin() -> Result<(), BenchmarkError> {
+		let caller_origin = create_reward_pool::<T>()?;
+		let new_admin: T::AccountId = whitelisted_caller();
+
+		#[extrinsic_call]
+		_(caller_origin as T::RuntimeOrigin, 0, new_admin.clone());
+
+		assert_last_event::<T>(Event::PoolAdminModified { pool_id: 0, new_admin }.into());
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn set_pool_expiry_block() -> Result<(), BenchmarkError> {
+		let create_origin = create_reward_pool::<T>()?;
+
+		// stake first to get worth case benchmark.
+		{
+			let staker: T::AccountId = whitelisted_caller();
+			let min_balance = mint_into::<T>(&staker, &T::BenchmarkHelper::staked_asset());
+
+			assert_ok!(AssetRewards::<T>::stake(RawOrigin::Signed(staker).into(), 0, min_balance));
+		}
+
+		let new_expiry_block =
+			pool_expire::<T>().evaluate(System::<T>::block_number()) + BlockNumberFor::<T>::one();
+
+		#[extrinsic_call]
+		_(create_origin as T::RuntimeOrigin, 0, DispatchTime::At(new_expiry_block));
+
+		assert_last_event::<T>(
+			Event::PoolExpiryBlockModified { pool_id: 0, new_expiry_block }.into(),
+		);
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn deposit_reward_tokens() -> Result<(), BenchmarkError> {
+		create_reward_pool::<T>()?;
+		let caller = whitelisted_caller();
+
+		let reward_asset = T::BenchmarkHelper::reward_asset();
+		let pool_acc = AssetRewards::<T>::pool_account_id(&0u32);
+		let min_balance = mint_into::<T>(&caller, &reward_asset);
+
+		let balance_before = T::Assets::balance(reward_asset.clone(), &pool_acc);
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller), 0, min_balance);
+
+		let balance_after = T::Assets::balance(reward_asset, &pool_acc);
+
+		assert_eq!(balance_after, balance_before + min_balance);
+
+		Ok(())
+	}
+
+	#[benchmark]
+	fn cleanup_pool() -> Result<(), BenchmarkError> {
+		let create_origin = create_reward_pool::<T>()?;
+		let caller = T::CreatePoolOrigin::ensure_origin(create_origin.clone()).unwrap();
+
+		// deposit rewards tokens to get worth case benchmark.
+		{
+			let caller = whitelisted_caller();
+			let reward_asset = T::BenchmarkHelper::reward_asset();
+			let min_balance = mint_into::<T>(&caller, &reward_asset);
+			assert_ok!(AssetRewards::<T>::deposit_reward_tokens(
+				RawOrigin::Signed(caller).into(),
+				0,
+				min_balance
+			));
+		}
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(caller), 0);
+
+		assert_last_event::<T>(Event::PoolCleanedUp { pool_id: 0 }.into());
+
+		Ok(())
+	}
+
+	impl_benchmark_test_suite!(AssetRewards, crate::mock::new_test_ext(), crate::mock::MockRuntime);
+}
diff --git a/substrate/frame/asset-rewards/src/lib.rs b/substrate/frame/asset-rewards/src/lib.rs
new file mode 100644
index 00000000000..4ce73e9febf
--- /dev/null
+++ b/substrate/frame/asset-rewards/src/lib.rs
@@ -0,0 +1,905 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! # FRAME Staking Rewards Pallet
+//!
+//! Allows accounts to be rewarded for holding `fungible` asset/s, for example LP tokens.
+//!
+//! ## Overview
+//!
+//! Initiate an incentive program for a fungible asset by creating a new pool.
+//!
+//! During pool creation, a 'staking asset', 'reward asset', 'reward rate per block', 'expiry
+//! block', and 'admin' are specified.
+//!
+//! Once created, holders of the 'staking asset' can 'stake' them in a corresponding pool, which
+//! creates a Freeze on the asset.
+//!
+//! Once staked, rewards denominated in 'reward asset' begin accumulating to the staker,
+//! proportional to their share of the total staked tokens in the pool.
+//!
+//! Reward assets pending distribution are held in an account unique to each pool.
+//!
+//! Care should be taken by the pool operator to keep pool accounts adequately funded with the
+//! reward asset.
+//!
+//! The pool admin may increase reward rate per block, increase expiry block, and change admin.
+//!
+//! ## Disambiguation
+//!
+//! While this pallet shares some terminology with the `staking-pool` and similar native staking
+//! related pallets, it is distinct and is entirely unrelated to native staking.
+//!
+//! ## Permissioning
+//!
+//! Currently, pool creation and management restricted to a configured Origin.
+//!
+//! Future iterations of this pallet may allow permissionless creation and management of pools.
+//!
+//! Note: The permissioned origin must return an AccountId. This can be achieved for any Origin by
+//! wrapping it with `EnsureSuccess`.
+//!
+//! ## Implementation Notes
+//!
+//! Internal logic functions such as `update_pool_and_staker_rewards` were deliberately written
+//! without side-effects.
+//!
+//! Storage interaction such as reads and writes are instead all performed in the top level
+//! pallet Call method, which while slightly more verbose, makes it easier to understand the
+//! code and reason about how storage reads and writes occur in the pallet.
+//!
+//! ## Rewards Algorithm
+//!
+//! The rewards algorithm is based on the Synthetix [StakingRewards.sol](https://github.com/Synthetixio/synthetix/blob/develop/contracts/StakingRewards.sol)
+//! smart contract.
+//!
+//! Rewards are calculated JIT (just-in-time), and all operations are O(1) making the approach
+//! scalable to many pools and stakers.
+//!
+//! ### Resources
+//!
+//! - [This video series](https://www.youtube.com/watch?v=6ZO5aYg1GI8), which walks through the math
+//!   of the algorithm.
+//! - [This dev.to article](https://dev.to/heymarkkop/understanding-sushiswaps-masterchef-staking-rewards-1m6f),
+//!   which explains the algorithm of the SushiSwap MasterChef staking. While not identical to the
+//!   Synthetix approach, they are quite similar.
+#![deny(missing_docs)]
+#![cfg_attr(not(feature = "std"), no_std)]
+
+pub use pallet::*;
+
+use codec::{Codec, Decode, Encode, MaxEncodedLen};
+use frame_support::{
+	traits::{
+		fungibles::{Inspect, Mutate},
+		schedule::DispatchTime,
+		tokens::Balance,
+	},
+	PalletId,
+};
+use frame_system::pallet_prelude::BlockNumberFor;
+use scale_info::TypeInfo;
+use sp_core::Get;
+use sp_runtime::{
+	traits::{MaybeDisplay, Zero},
+	DispatchError,
+};
+use sp_std::boxed::Box;
+
+#[cfg(feature = "runtime-benchmarks")]
+pub mod benchmarking;
+#[cfg(test)]
+mod mock;
+#[cfg(test)]
+mod tests;
+mod weights;
+
+pub use weights::WeightInfo;
+
+/// Unique id type for each pool.
+pub type PoolId = u32;
+
+/// Multiplier to maintain precision when calculating rewards.
+pub(crate) const PRECISION_SCALING_FACTOR: u16 = 4096;
+
+/// Convenience alias for `PoolInfo`.
+pub type PoolInfoFor<T> = PoolInfo<
+	<T as frame_system::Config>::AccountId,
+	<T as Config>::AssetId,
+	<T as Config>::Balance,
+	BlockNumberFor<T>,
+>;
+
+/// The state of a staker in a pool.
+#[derive(Debug, Default, Clone, Decode, Encode, MaxEncodedLen, TypeInfo)]
+pub struct PoolStakerInfo<Balance> {
+	/// Amount of tokens staked.
+	amount: Balance,
+	/// Accumulated, unpaid rewards.
+	rewards: Balance,
+	/// Reward per token value at the time of the staker's last interaction with the contract.
+	reward_per_token_paid: Balance,
+}
+
+/// The state and configuration of an incentive pool.
+#[derive(Debug, Clone, Decode, Encode, Default, PartialEq, Eq, MaxEncodedLen, TypeInfo)]
+pub struct PoolInfo<AccountId, AssetId, Balance, BlockNumber> {
+	/// The asset staked in this pool.
+	staked_asset_id: AssetId,
+	/// The asset distributed as rewards by this pool.
+	reward_asset_id: AssetId,
+	/// The amount of tokens rewarded per block.
+	reward_rate_per_block: Balance,
+	/// The block the pool will cease distributing rewards.
+	expiry_block: BlockNumber,
+	/// The account authorized to manage this pool.
+	admin: AccountId,
+	/// The total amount of tokens staked in this pool.
+	total_tokens_staked: Balance,
+	/// Total rewards accumulated per token, up to the `last_update_block`.
+	reward_per_token_stored: Balance,
+	/// Last block number the pool was updated.
+	last_update_block: BlockNumber,
+	/// The account that holds the pool's rewards.
+	account: AccountId,
+}
+
+sp_api::decl_runtime_apis! {
+	/// The runtime API for the asset rewards pallet.
+	pub trait AssetRewards<Cost: MaybeDisplay + Codec> {
+		/// Get the cost of creating a pool.
+		///
+		/// This is especially useful when the cost is dynamic.
+		fn pool_creation_cost() -> Cost;
+	}
+}
+
+#[frame_support::pallet]
+pub mod pallet {
+	use super::*;
+	use frame_support::{
+		pallet_prelude::*,
+		traits::{
+			fungibles::MutateFreeze,
+			tokens::{AssetId, Fortitude, Preservation},
+			Consideration, Footprint,
+		},
+	};
+	use frame_system::pallet_prelude::*;
+	use sp_runtime::{
+		traits::{
+			AccountIdConversion, BadOrigin, EnsureAdd, EnsureAddAssign, EnsureDiv, EnsureMul,
+			EnsureSub, EnsureSubAssign,
+		},
+		DispatchResult,
+	};
+
+	#[pallet::pallet]
+	pub struct Pallet<T>(_);
+
+	/// A reason for the pallet placing a hold on funds.
+	#[pallet::composite_enum]
+	pub enum FreezeReason {
+		/// Funds are staked in the pallet.
+		#[codec(index = 0)]
+		Staked,
+	}
+
+	/// A reason for the pallet placing a hold on funds.
+	#[pallet::composite_enum]
+	pub enum HoldReason {
+		/// Cost associated with storing pool information on-chain.
+		#[codec(index = 0)]
+		PoolCreation,
+	}
+
+	#[pallet::config]
+	pub trait Config: frame_system::Config {
+		/// Overarching event type.
+		type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
+
+		/// The pallet's unique identifier, used to derive the pool's account ID.
+		///
+		/// The account ID is derived once during pool creation and stored in the storage.
+		#[pallet::constant]
+		type PalletId: Get<PalletId>;
+
+		/// Identifier for each type of asset.
+		type AssetId: AssetId + Member + Parameter;
+
+		/// The type in which the assets are measured.
+		type Balance: Balance + TypeInfo;
+
+		/// The origin with permission to create pools.
+		///
+		/// The Origin must return an AccountId.
+		type CreatePoolOrigin: EnsureOrigin<Self::RuntimeOrigin, Success = Self::AccountId>;
+
+		/// Registry of assets that can be configured to either stake for rewards, or be offered as
+		/// rewards for staking.
+		type Assets: Inspect<Self::AccountId, AssetId = Self::AssetId, Balance = Self::Balance>
+			+ Mutate<Self::AccountId>;
+
+		/// Freezer for the Assets.
+		type AssetsFreezer: MutateFreeze<
+			Self::AccountId,
+			Id = Self::RuntimeFreezeReason,
+			AssetId = Self::AssetId,
+			Balance = Self::Balance,
+		>;
+
+		/// The overarching freeze reason.
+		type RuntimeFreezeReason: From<FreezeReason>;
+
+		/// Means for associating a cost with the on-chain storage of pool information, which
+		/// is incurred by the pool creator.
+		///
+		/// The passed `Footprint` specifically accounts for the storage footprint of the pool's
+		/// information itself, excluding any potential storage footprint related to the stakers.
+		type Consideration: Consideration<Self::AccountId, Footprint>;
+
+		/// Weight information for extrinsics in this pallet.
+		type WeightInfo: WeightInfo;
+
+		/// Helper for benchmarking.
+		#[cfg(feature = "runtime-benchmarks")]
+		type BenchmarkHelper: benchmarking::BenchmarkHelper<Self::AssetId>;
+	}
+
+	/// State of pool stakers.
+	#[pallet::storage]
+	pub type PoolStakers<T: Config> = StorageDoubleMap<
+		_,
+		Blake2_128Concat,
+		PoolId,
+		Blake2_128Concat,
+		T::AccountId,
+		PoolStakerInfo<T::Balance>,
+	>;
+
+	/// State and configuration of each staking pool.
+	#[pallet::storage]
+	pub type Pools<T: Config> = StorageMap<_, Blake2_128Concat, PoolId, PoolInfoFor<T>>;
+
+	/// The cost associated with storing pool information on-chain which was incurred by the pool
+	/// creator.
+	///
+	/// This cost may be [`None`], as determined by [`Config::Consideration`].
+	#[pallet::storage]
+	pub type PoolCost<T: Config> =
+		StorageMap<_, Blake2_128Concat, PoolId, (T::AccountId, T::Consideration)>;
+
+	/// Stores the [`PoolId`] to use for the next pool.
+	///
+	/// Incremented when a new pool is created.
+	#[pallet::storage]
+	pub type NextPoolId<T: Config> = StorageValue<_, PoolId, ValueQuery>;
+
+	#[pallet::event]
+	#[pallet::generate_deposit(pub(super) fn deposit_event)]
+	pub enum Event<T: Config> {
+		/// An account staked some tokens in a pool.
+		Staked {
+			/// The account that staked assets.
+			staker: T::AccountId,
+			/// The pool.
+			pool_id: PoolId,
+			/// The staked asset amount.
+			amount: T::Balance,
+		},
+		/// An account unstaked some tokens from a pool.
+		Unstaked {
+			/// The account that signed transaction.
+			caller: T::AccountId,
+			/// The account that unstaked assets.
+			staker: T::AccountId,
+			/// The pool.
+			pool_id: PoolId,
+			/// The unstaked asset amount.
+			amount: T::Balance,
+		},
+		/// An account harvested some rewards.
+		RewardsHarvested {
+			/// The account that signed transaction.
+			caller: T::AccountId,
+			/// The staker whos rewards were harvested.
+			staker: T::AccountId,
+			/// The pool.
+			pool_id: PoolId,
+			/// The amount of harvested tokens.
+			amount: T::Balance,
+		},
+		/// A new reward pool was created.
+		PoolCreated {
+			/// The account that created the pool.
+			creator: T::AccountId,
+			/// The unique ID for the new pool.
+			pool_id: PoolId,
+			/// The staking asset.
+			staked_asset_id: T::AssetId,
+			/// The reward asset.
+			reward_asset_id: T::AssetId,
+			/// The initial reward rate per block.
+			reward_rate_per_block: T::Balance,
+			/// The block the pool will cease to accumulate rewards.
+			expiry_block: BlockNumberFor<T>,
+			/// The account allowed to modify the pool.
+			admin: T::AccountId,
+		},
+		/// A pool reward rate was modified by the admin.
+		PoolRewardRateModified {
+			/// The modified pool.
+			pool_id: PoolId,
+			/// The new reward rate per block.
+			new_reward_rate_per_block: T::Balance,
+		},
+		/// A pool admin was modified.
+		PoolAdminModified {
+			/// The modified pool.
+			pool_id: PoolId,
+			/// The new admin.
+			new_admin: T::AccountId,
+		},
+		/// A pool expiry block was modified by the admin.
+		PoolExpiryBlockModified {
+			/// The modified pool.
+			pool_id: PoolId,
+			/// The new expiry block.
+			new_expiry_block: BlockNumberFor<T>,
+		},
+		/// A pool information was cleared after it's completion.
+		PoolCleanedUp {
+			/// The cleared pool.
+			pool_id: PoolId,
+		},
+	}
+
+	#[pallet::error]
+	pub enum Error<T> {
+		/// The staker does not have enough tokens to perform the operation.
+		NotEnoughTokens,
+		/// An operation was attempted on a non-existent pool.
+		NonExistentPool,
+		/// An operation was attempted for a non-existent staker.
+		NonExistentStaker,
+		/// An operation was attempted with a non-existent asset.
+		NonExistentAsset,
+		/// There was an error converting a block number.
+		BlockNumberConversionError,
+		/// The expiry block must be in the future.
+		ExpiryBlockMustBeInTheFuture,
+		/// Insufficient funds to create the freeze.
+		InsufficientFunds,
+		/// The expiry block can be only extended.
+		ExpiryCut,
+		/// The reward rate per block can be only increased.
+		RewardRateCut,
+		/// The pool still has staked tokens or rewards.
+		NonEmptyPool,
+	}
+
+	#[pallet::hooks]
+	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
+		fn integrity_test() {
+			// The AccountId is at least 16 bytes to contain the unique PalletId.
+			let pool_id: PoolId = 1;
+			assert!(
+				<frame_support::PalletId as AccountIdConversion<T::AccountId>>::try_into_sub_account(
+					&T::PalletId::get(), pool_id,
+				)
+				.is_some()
+			);
+		}
+	}
+
+	/// Pallet's callable functions.
+	#[pallet::call(weight(<T as Config>::WeightInfo))]
+	impl<T: Config> Pallet<T> {
+		/// Create a new reward pool.
+		///
+		/// Parameters:
+		/// - `origin`: must be `Config::CreatePoolOrigin`;
+		/// - `staked_asset_id`: the asset to be staked in the pool;
+		/// - `reward_asset_id`: the asset to be distributed as rewards;
+		/// - `reward_rate_per_block`: the amount of reward tokens distributed per block;
+		/// - `expiry`: the block number at which the pool will cease to accumulate rewards. The
+		///   [`DispatchTime::After`] variant evaluated at the execution time.
+		/// - `admin`: the account allowed to extend the pool expiration, increase the rewards rate
+		///   and receive the unutilized reward tokens back after the pool completion. If `None`,
+		///   the caller is set as an admin.
+		#[pallet::call_index(0)]
+		pub fn create_pool(
+			origin: OriginFor<T>,
+			staked_asset_id: Box<T::AssetId>,
+			reward_asset_id: Box<T::AssetId>,
+			reward_rate_per_block: T::Balance,
+			expiry: DispatchTime<BlockNumberFor<T>>,
+			admin: Option<T::AccountId>,
+		) -> DispatchResult {
+			// Check the origin.
+			let creator = T::CreatePoolOrigin::ensure_origin(origin)?;
+
+			// Ensure the assets exist.
+			ensure!(
+				T::Assets::asset_exists(*staked_asset_id.clone()),
+				Error::<T>::NonExistentAsset
+			);
+			ensure!(
+				T::Assets::asset_exists(*reward_asset_id.clone()),
+				Error::<T>::NonExistentAsset
+			);
+
+			// Check the expiry block.
+			let expiry_block = expiry.evaluate(frame_system::Pallet::<T>::block_number());
+			ensure!(
+				expiry_block > frame_system::Pallet::<T>::block_number(),
+				Error::<T>::ExpiryBlockMustBeInTheFuture
+			);
+
+			let pool_id = NextPoolId::<T>::get();
+
+			let footprint = Self::pool_creation_footprint();
+			let cost = T::Consideration::new(&creator, footprint)?;
+			PoolCost::<T>::insert(pool_id, (creator.clone(), cost));
+
+			let admin = admin.unwrap_or(creator.clone());
+
+			// Create the pool.
+			let pool = PoolInfoFor::<T> {
+				staked_asset_id: *staked_asset_id.clone(),
+				reward_asset_id: *reward_asset_id.clone(),
+				reward_rate_per_block,
+				total_tokens_staked: 0u32.into(),
+				reward_per_token_stored: 0u32.into(),
+				last_update_block: 0u32.into(),
+				expiry_block,
+				admin: admin.clone(),
+				account: Self::pool_account_id(&pool_id),
+			};
+
+			// Insert it into storage.
+			Pools::<T>::insert(pool_id, pool);
+
+			NextPoolId::<T>::put(pool_id.ensure_add(1)?);
+
+			// Emit created event.
+			Self::deposit_event(Event::PoolCreated {
+				creator,
+				pool_id,
+				staked_asset_id: *staked_asset_id,
+				reward_asset_id: *reward_asset_id,
+				reward_rate_per_block,
+				expiry_block,
+				admin,
+			});
+
+			Ok(())
+		}
+
+		/// Stake additional tokens in a pool.
+		///
+		/// A freeze is placed on the staked tokens.
+		#[pallet::call_index(1)]
+		pub fn stake(origin: OriginFor<T>, pool_id: PoolId, amount: T::Balance) -> DispatchResult {
+			let staker = ensure_signed(origin)?;
+
+			// Always start by updating staker and pool rewards.
+			let pool_info = Pools::<T>::get(pool_id).ok_or(Error::<T>::NonExistentPool)?;
+			let staker_info = PoolStakers::<T>::get(pool_id, &staker).unwrap_or_default();
+			let (mut pool_info, mut staker_info) =
+				Self::update_pool_and_staker_rewards(&pool_info, &staker_info)?;
+
+			T::AssetsFreezer::increase_frozen(
+				pool_info.staked_asset_id.clone(),
+				&FreezeReason::Staked.into(),
+				&staker,
+				amount,
+			)?;
+
+			// Update Pools.
+			pool_info.total_tokens_staked.ensure_add_assign(amount)?;
+
+			Pools::<T>::insert(pool_id, pool_info);
+
+			// Update PoolStakers.
+			staker_info.amount.ensure_add_assign(amount)?;
+			PoolStakers::<T>::insert(pool_id, &staker, staker_info);
+
+			// Emit event.
+			Self::deposit_event(Event::Staked { staker, pool_id, amount });
+
+			Ok(())
+		}
+
+		/// Unstake tokens from a pool.
+		///
+		/// Removes the freeze on the staked tokens.
+		///
+		/// Parameters:
+		/// - origin: must be the `staker` if the pool is still active. Otherwise, any account.
+		/// - pool_id: the pool to unstake from.
+		/// - amount: the amount of tokens to unstake.
+		/// - staker: the account to unstake from. If `None`, the caller is used.
+		#[pallet::call_index(2)]
+		pub fn unstake(
+			origin: OriginFor<T>,
+			pool_id: PoolId,
+			amount: T::Balance,
+			staker: Option<T::AccountId>,
+		) -> DispatchResult {
+			let caller = ensure_signed(origin)?;
+			let staker = staker.unwrap_or(caller.clone());
+
+			// Always start by updating the pool rewards.
+			let pool_info = Pools::<T>::get(pool_id).ok_or(Error::<T>::NonExistentPool)?;
+			let now = frame_system::Pallet::<T>::block_number();
+			ensure!(now > pool_info.expiry_block || caller == staker, BadOrigin);
+
+			let staker_info = PoolStakers::<T>::get(pool_id, &staker).unwrap_or_default();
+			let (mut pool_info, mut staker_info) =
+				Self::update_pool_and_staker_rewards(&pool_info, &staker_info)?;
+
+			// Check the staker has enough staked tokens.
+			ensure!(staker_info.amount >= amount, Error::<T>::NotEnoughTokens);
+
+			// Unfreeze staker assets.
+			T::AssetsFreezer::decrease_frozen(
+				pool_info.staked_asset_id.clone(),
+				&FreezeReason::Staked.into(),
+				&staker,
+				amount,
+			)?;
+
+			// Update Pools.
+			pool_info.total_tokens_staked.ensure_sub_assign(amount)?;
+			Pools::<T>::insert(pool_id, pool_info);
+
+			// Update PoolStakers.
+			staker_info.amount.ensure_sub_assign(amount)?;
+
+			if staker_info.amount.is_zero() && staker_info.rewards.is_zero() {
+				PoolStakers::<T>::remove(&pool_id, &staker);
+			} else {
+				PoolStakers::<T>::insert(&pool_id, &staker, staker_info);
+			}
+
+			// Emit event.
+			Self::deposit_event(Event::Unstaked { caller, staker, pool_id, amount });
+
+			Ok(())
+		}
+
+		/// Harvest unclaimed pool rewards.
+		///
+		/// Parameters:
+		/// - origin: must be the `staker` if the pool is still active. Otherwise, any account.
+		/// - pool_id: the pool to harvest from.
+		/// - staker: the account for which to harvest rewards. If `None`, the caller is used.
+		#[pallet::call_index(3)]
+		pub fn harvest_rewards(
+			origin: OriginFor<T>,
+			pool_id: PoolId,
+			staker: Option<T::AccountId>,
+		) -> DispatchResult {
+			let caller = ensure_signed(origin)?;
+			let staker = staker.unwrap_or(caller.clone());
+
+			// Always start by updating the pool and staker rewards.
+			let pool_info = Pools::<T>::get(pool_id).ok_or(Error::<T>::NonExistentPool)?;
+			let now = frame_system::Pallet::<T>::block_number();
+			ensure!(now > pool_info.expiry_block || caller == staker, BadOrigin);
+
+			let staker_info =
+				PoolStakers::<T>::get(pool_id, &staker).ok_or(Error::<T>::NonExistentStaker)?;
+			let (pool_info, mut staker_info) =
+				Self::update_pool_and_staker_rewards(&pool_info, &staker_info)?;
+
+			// Transfer unclaimed rewards from the pool to the staker.
+			T::Assets::transfer(
+				pool_info.reward_asset_id,
+				&pool_info.account,
+				&staker,
+				staker_info.rewards,
+				// Could kill the account, but only if the pool was already almost empty.
+				Preservation::Expendable,
+			)?;
+
+			// Emit event.
+			Self::deposit_event(Event::RewardsHarvested {
+				caller,
+				staker: staker.clone(),
+				pool_id,
+				amount: staker_info.rewards,
+			});
+
+			// Reset staker rewards.
+			staker_info.rewards = 0u32.into();
+
+			if staker_info.amount.is_zero() {
+				PoolStakers::<T>::remove(&pool_id, &staker);
+			} else {
+				PoolStakers::<T>::insert(&pool_id, &staker, staker_info);
+			}
+
+			Ok(())
+		}
+
+		/// Modify a pool reward rate.
+		///
+		/// Currently the reward rate can only be increased.
+		///
+		/// Only the pool admin may perform this operation.
+		#[pallet::call_index(4)]
+		pub fn set_pool_reward_rate_per_block(
+			origin: OriginFor<T>,
+			pool_id: PoolId,
+			new_reward_rate_per_block: T::Balance,
+		) -> DispatchResult {
+			let caller = T::CreatePoolOrigin::ensure_origin(origin.clone())
+				.or_else(|_| ensure_signed(origin))?;
+
+			let pool_info = Pools::<T>::get(pool_id).ok_or(Error::<T>::NonExistentPool)?;
+			ensure!(pool_info.admin == caller, BadOrigin);
+			ensure!(
+				new_reward_rate_per_block > pool_info.reward_rate_per_block,
+				Error::<T>::RewardRateCut
+			);
+
+			// Always start by updating the pool rewards.
+			let rewards_per_token = Self::reward_per_token(&pool_info)?;
+			let mut pool_info = Self::update_pool_rewards(&pool_info, rewards_per_token)?;
+
+			pool_info.reward_rate_per_block = new_reward_rate_per_block;
+			Pools::<T>::insert(pool_id, pool_info);
+
+			Self::deposit_event(Event::PoolRewardRateModified {
+				pool_id,
+				new_reward_rate_per_block,
+			});
+
+			Ok(())
+		}
+
+		/// Modify a pool admin.
+		///
+		/// Only the pool admin may perform this operation.
+		#[pallet::call_index(5)]
+		pub fn set_pool_admin(
+			origin: OriginFor<T>,
+			pool_id: PoolId,
+			new_admin: T::AccountId,
+		) -> DispatchResult {
+			let caller = T::CreatePoolOrigin::ensure_origin(origin.clone())
+				.or_else(|_| ensure_signed(origin))?;
+
+			let mut pool_info = Pools::<T>::get(pool_id).ok_or(Error::<T>::NonExistentPool)?;
+			ensure!(pool_info.admin == caller, BadOrigin);
+
+			pool_info.admin = new_admin.clone();
+			Pools::<T>::insert(pool_id, pool_info);
+
+			Self::deposit_event(Event::PoolAdminModified { pool_id, new_admin });
+
+			Ok(())
+		}
+
+		/// Set when the pool should expire.
+		///
+		/// Currently the expiry block can only be extended.
+		///
+		/// Only the pool admin may perform this operation.
+		#[pallet::call_index(6)]
+		pub fn set_pool_expiry_block(
+			origin: OriginFor<T>,
+			pool_id: PoolId,
+			new_expiry: DispatchTime<BlockNumberFor<T>>,
+		) -> DispatchResult {
+			let caller = T::CreatePoolOrigin::ensure_origin(origin.clone())
+				.or_else(|_| ensure_signed(origin))?;
+
+			let new_expiry = new_expiry.evaluate(frame_system::Pallet::<T>::block_number());
+			ensure!(
+				new_expiry > frame_system::Pallet::<T>::block_number(),
+				Error::<T>::ExpiryBlockMustBeInTheFuture
+			);
+
+			let pool_info = Pools::<T>::get(pool_id).ok_or(Error::<T>::NonExistentPool)?;
+			ensure!(pool_info.admin == caller, BadOrigin);
+			ensure!(new_expiry > pool_info.expiry_block, Error::<T>::ExpiryCut);
+
+			// Always start by updating the pool rewards.
+			let reward_per_token = Self::reward_per_token(&pool_info)?;
+			let mut pool_info = Self::update_pool_rewards(&pool_info, reward_per_token)?;
+
+			pool_info.expiry_block = new_expiry;
+			Pools::<T>::insert(pool_id, pool_info);
+
+			Self::deposit_event(Event::PoolExpiryBlockModified {
+				pool_id,
+				new_expiry_block: new_expiry,
+			});
+
+			Ok(())
+		}
+
+		/// Convenience method to deposit reward tokens into a pool.
+		///
+		/// This method is not strictly necessary (tokens could be transferred directly to the
+		/// pool pot address), but is provided for convenience so manual derivation of the
+		/// account id is not required.
+		#[pallet::call_index(7)]
+		pub fn deposit_reward_tokens(
+			origin: OriginFor<T>,
+			pool_id: PoolId,
+			amount: T::Balance,
+		) -> DispatchResult {
+			let caller = ensure_signed(origin)?;
+			let pool_info = Pools::<T>::get(pool_id).ok_or(Error::<T>::NonExistentPool)?;
+			T::Assets::transfer(
+				pool_info.reward_asset_id,
+				&caller,
+				&pool_info.account,
+				amount,
+				Preservation::Preserve,
+			)?;
+			Ok(())
+		}
+
+		/// Cleanup a pool.
+		///
+		/// Origin must be the pool admin.
+		///
+		/// Cleanup storage, release any associated storage cost and return the remaining reward
+		/// tokens to the admin.
+		#[pallet::call_index(8)]
+		pub fn cleanup_pool(origin: OriginFor<T>, pool_id: PoolId) -> DispatchResult {
+			let who = ensure_signed(origin)?;
+
+			let pool_info = Pools::<T>::get(pool_id).ok_or(Error::<T>::NonExistentPool)?;
+			ensure!(pool_info.admin == who, BadOrigin);
+
+			let stakers = PoolStakers::<T>::iter_key_prefix(pool_id).next();
+			ensure!(stakers.is_none(), Error::<T>::NonEmptyPool);
+
+			let pool_balance = T::Assets::reducible_balance(
+				pool_info.reward_asset_id.clone(),
+				&pool_info.account,
+				Preservation::Expendable,
+				Fortitude::Polite,
+			);
+			T::Assets::transfer(
+				pool_info.reward_asset_id,
+				&pool_info.account,
+				&pool_info.admin,
+				pool_balance,
+				Preservation::Expendable,
+			)?;
+
+			if let Some((who, cost)) = PoolCost::<T>::take(pool_id) {
+				T::Consideration::drop(cost, &who)?;
+			}
+
+			Pools::<T>::remove(pool_id);
+
+			Self::deposit_event(Event::PoolCleanedUp { pool_id });
+
+			Ok(())
+		}
+	}
+
+	impl<T: Config> Pallet<T> {
+		/// The pool creation footprint.
+		///
+		/// The footprint specifically accounts for the storage footprint of the pool's information
+		/// itself, excluding any potential storage footprint related to the stakers.
+		pub fn pool_creation_footprint() -> Footprint {
+			Footprint::from_mel::<(PoolId, PoolInfoFor<T>)>()
+		}
+
+		/// Derive a pool account ID from the pool's ID.
+		pub fn pool_account_id(id: &PoolId) -> T::AccountId {
+			T::PalletId::get().into_sub_account_truncating(id)
+		}
+
+		/// Computes update pool and staker reward state.
+		///
+		/// Should be called prior to any operation involving a staker.
+		///
+		/// Returns the updated pool and staker info.
+		///
+		/// NOTE: this function has no side-effects. Side-effects such as storage modifications are
+		/// the responsibility of the caller.
+		pub fn update_pool_and_staker_rewards(
+			pool_info: &PoolInfoFor<T>,
+			staker_info: &PoolStakerInfo<T::Balance>,
+		) -> Result<(PoolInfoFor<T>, PoolStakerInfo<T::Balance>), DispatchError> {
+			let reward_per_token = Self::reward_per_token(&pool_info)?;
+			let pool_info = Self::update_pool_rewards(pool_info, reward_per_token)?;
+
+			let mut new_staker_info = staker_info.clone();
+			new_staker_info.rewards = Self::derive_rewards(&staker_info, &reward_per_token)?;
+			new_staker_info.reward_per_token_paid = pool_info.reward_per_token_stored;
+			return Ok((pool_info, new_staker_info));
+		}
+
+		/// Computes update pool reward state.
+		///
+		/// Should be called every time the pool is adjusted, and a staker is not involved.
+		///
+		/// Returns the updated pool and staker info.
+		///
+		/// NOTE: this function has no side-effects. Side-effects such as storage modifications are
+		/// the responsibility of the caller.
+		pub fn update_pool_rewards(
+			pool_info: &PoolInfoFor<T>,
+			reward_per_token: T::Balance,
+		) -> Result<PoolInfoFor<T>, DispatchError> {
+			let mut new_pool_info = pool_info.clone();
+			new_pool_info.last_update_block = frame_system::Pallet::<T>::block_number();
+			new_pool_info.reward_per_token_stored = reward_per_token;
+
+			Ok(new_pool_info)
+		}
+
+		/// Derives the current reward per token for this pool.
+		fn reward_per_token(pool_info: &PoolInfoFor<T>) -> Result<T::Balance, DispatchError> {
+			if pool_info.total_tokens_staked.is_zero() {
+				return Ok(pool_info.reward_per_token_stored)
+			}
+
+			let rewardable_blocks_elapsed: u32 =
+				match Self::last_block_reward_applicable(pool_info.expiry_block)
+					.ensure_sub(pool_info.last_update_block)?
+					.try_into()
+				{
+					Ok(b) => b,
+					Err(_) => return Err(Error::<T>::BlockNumberConversionError.into()),
+				};
+
+			Ok(pool_info.reward_per_token_stored.ensure_add(
+				pool_info
+					.reward_rate_per_block
+					.ensure_mul(rewardable_blocks_elapsed.into())?
+					.ensure_mul(PRECISION_SCALING_FACTOR.into())?
+					.ensure_div(pool_info.total_tokens_staked)?,
+			)?)
+		}
+
+		/// Derives the amount of rewards earned by a staker.
+		///
+		/// This is a helper function for `update_pool_rewards` and should not be called directly.
+		fn derive_rewards(
+			staker_info: &PoolStakerInfo<T::Balance>,
+			reward_per_token: &T::Balance,
+		) -> Result<T::Balance, DispatchError> {
+			Ok(staker_info
+				.amount
+				.ensure_mul(reward_per_token.ensure_sub(staker_info.reward_per_token_paid)?)?
+				.ensure_div(PRECISION_SCALING_FACTOR.into())?
+				.ensure_add(staker_info.rewards)?)
+		}
+
+		fn last_block_reward_applicable(pool_expiry_block: BlockNumberFor<T>) -> BlockNumberFor<T> {
+			let now = frame_system::Pallet::<T>::block_number();
+			if now < pool_expiry_block {
+				now
+			} else {
+				pool_expiry_block
+			}
+		}
+	}
+}
diff --git a/substrate/frame/asset-rewards/src/mock.rs b/substrate/frame/asset-rewards/src/mock.rs
new file mode 100644
index 00000000000..87c8a8a0dea
--- /dev/null
+++ b/substrate/frame/asset-rewards/src/mock.rs
@@ -0,0 +1,221 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Test environment for Asset Rewards pallet.
+
+use super::*;
+use crate as pallet_asset_rewards;
+use core::default::Default;
+use frame_support::{
+	construct_runtime, derive_impl,
+	instances::Instance1,
+	parameter_types,
+	traits::{
+		tokens::fungible::{HoldConsideration, NativeFromLeft, NativeOrWithId, UnionOf},
+		AsEnsureOriginWithArg, ConstU128, ConstU32, EnsureOrigin, LinearStoragePrice,
+	},
+	PalletId,
+};
+use frame_system::EnsureSigned;
+use sp_runtime::{traits::IdentityLookup, BuildStorage};
+
+#[cfg(feature = "runtime-benchmarks")]
+use self::benchmarking::BenchmarkHelper;
+
+type Block = frame_system::mocking::MockBlock<MockRuntime>;
+
+construct_runtime!(
+	pub enum MockRuntime
+	{
+		System: frame_system,
+		Balances: pallet_balances,
+		Assets: pallet_assets::<Instance1>,
+		AssetsFreezer: pallet_assets_freezer::<Instance1>,
+		StakingRewards: pallet_asset_rewards,
+	}
+);
+
+#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
+impl frame_system::Config for MockRuntime {
+	type AccountId = u128;
+	type Lookup = IdentityLookup<Self::AccountId>;
+	type Block = Block;
+	type AccountData = pallet_balances::AccountData<u128>;
+}
+
+impl pallet_balances::Config for MockRuntime {
+	type Balance = u128;
+	type DustRemoval = ();
+	type RuntimeEvent = RuntimeEvent;
+	type ExistentialDeposit = ConstU128<100>;
+	type AccountStore = System;
+	type WeightInfo = ();
+	type MaxLocks = ();
+	type MaxReserves = ConstU32<50>;
+	type ReserveIdentifier = [u8; 8];
+	type FreezeIdentifier = RuntimeFreezeReason;
+	type MaxFreezes = ConstU32<50>;
+	type RuntimeHoldReason = RuntimeHoldReason;
+	type RuntimeFreezeReason = RuntimeFreezeReason;
+	type DoneSlashHandler = ();
+}
+
+impl pallet_assets::Config<Instance1> for MockRuntime {
+	type RuntimeEvent = RuntimeEvent;
+	type Balance = u128;
+	type RemoveItemsLimit = ConstU32<1000>;
+	type AssetId = u32;
+	type AssetIdParameter = u32;
+	type Currency = Balances;
+	type CreateOrigin = AsEnsureOriginWithArg<EnsureSigned<Self::AccountId>>;
+	type ForceOrigin = frame_system::EnsureRoot<Self::AccountId>;
+	type AssetDeposit = ConstU128<1>;
+	type AssetAccountDeposit = ConstU128<10>;
+	type MetadataDepositBase = ConstU128<1>;
+	type MetadataDepositPerByte = ConstU128<1>;
+	type ApprovalDeposit = ConstU128<1>;
+	type StringLimit = ConstU32<50>;
+	type Freezer = AssetsFreezer;
+	type Extra = ();
+	type WeightInfo = ();
+	type CallbackHandle = ();
+	pallet_assets::runtime_benchmarks_enabled! {
+		type BenchmarkHelper = ();
+	}
+}
+
+parameter_types! {
+	pub const StakingRewardsPalletId: PalletId = PalletId(*b"py/stkrd");
+	pub const Native: NativeOrWithId<u32> = NativeOrWithId::Native;
+	pub const PermissionedAccountId: u128 = 0;
+}
+
+/// Give Root Origin permission to create pools.
+pub struct MockPermissionedOrigin;
+impl EnsureOrigin<RuntimeOrigin> for MockPermissionedOrigin {
+	type Success = <MockRuntime as frame_system::Config>::AccountId;
+
+	fn try_origin(origin: RuntimeOrigin) -> Result<Self::Success, RuntimeOrigin> {
+		match origin.clone().into() {
+			Ok(frame_system::RawOrigin::Root) => Ok(PermissionedAccountId::get()),
+			_ => Err(origin),
+		}
+	}
+
+	#[cfg(feature = "runtime-benchmarks")]
+	fn try_successful_origin() -> Result<RuntimeOrigin, ()> {
+		Ok(RuntimeOrigin::root())
+	}
+}
+
+/// Allow Freezes for the `Assets` pallet
+impl pallet_assets_freezer::Config<pallet_assets_freezer::Instance1> for MockRuntime {
+	type RuntimeFreezeReason = RuntimeFreezeReason;
+	type RuntimeEvent = RuntimeEvent;
+}
+
+pub type NativeAndAssets = UnionOf<Balances, Assets, NativeFromLeft, NativeOrWithId<u32>, u128>;
+
+pub type NativeAndAssetsFreezer =
+	UnionOf<Balances, AssetsFreezer, NativeFromLeft, NativeOrWithId<u32>, u128>;
+
+#[cfg(feature = "runtime-benchmarks")]
+pub struct AssetRewardsBenchmarkHelper;
+#[cfg(feature = "runtime-benchmarks")]
+impl BenchmarkHelper<NativeOrWithId<u32>> for AssetRewardsBenchmarkHelper {
+	fn staked_asset() -> NativeOrWithId<u32> {
+		NativeOrWithId::<u32>::WithId(101)
+	}
+	fn reward_asset() -> NativeOrWithId<u32> {
+		NativeOrWithId::<u32>::WithId(102)
+	}
+}
+
+parameter_types! {
+	pub const CreationHoldReason: RuntimeHoldReason =
+		RuntimeHoldReason::StakingRewards(pallet_asset_rewards::HoldReason::PoolCreation);
+}
+
+impl Config for MockRuntime {
+	type RuntimeEvent = RuntimeEvent;
+	type AssetId = NativeOrWithId<u32>;
+	type Balance = <Self as pallet_balances::Config>::Balance;
+	type Assets = NativeAndAssets;
+	type AssetsFreezer = NativeAndAssetsFreezer;
+	type PalletId = StakingRewardsPalletId;
+	type CreatePoolOrigin = MockPermissionedOrigin;
+	type WeightInfo = ();
+	type RuntimeFreezeReason = RuntimeFreezeReason;
+	type Consideration = HoldConsideration<
+		u128,
+		Balances,
+		CreationHoldReason,
+		LinearStoragePrice<ConstU128<100>, ConstU128<0>, u128>,
+	>;
+	#[cfg(feature = "runtime-benchmarks")]
+	type BenchmarkHelper = AssetRewardsBenchmarkHelper;
+}
+
+pub(crate) fn new_test_ext() -> sp_io::TestExternalities {
+	let mut t = frame_system::GenesisConfig::<MockRuntime>::default().build_storage().unwrap();
+
+	pallet_assets::GenesisConfig::<MockRuntime, Instance1> {
+		// Genesis assets: id, owner, is_sufficient, min_balance
+		// pub assets: Vec<(T::AssetId, T::AccountId, bool, T::Balance)>,
+		assets: vec![(1, 1, true, 1), (10, 1, true, 1), (20, 1, true, 1)],
+		// Genesis metadata: id, name, symbol, decimals
+		// pub metadata: Vec<(T::AssetId, Vec<u8>, Vec<u8>, u8)>,
+		metadata: vec![
+			(1, b"test".to_vec(), b"TST".to_vec(), 18),
+			(10, b"test10".to_vec(), b"T10".to_vec(), 18),
+			(20, b"test20".to_vec(), b"T20".to_vec(), 18),
+		],
+		// Genesis accounts: id, account_id, balance
+		// pub accounts: Vec<(T::AssetId, T::AccountId, T::Balance)>,
+		accounts: vec![
+			(1, 1, 10000),
+			(1, 2, 20000),
+			(1, 3, 30000),
+			(1, 4, 40000),
+			(1, 10, 40000),
+			(1, 20, 40000),
+		],
+		next_asset_id: None,
+	}
+	.assimilate_storage(&mut t)
+	.unwrap();
+
+	let pool_zero_account_id = 31086825966906540362769395565;
+	pallet_balances::GenesisConfig::<MockRuntime> {
+		balances: vec![
+			(0, 10000),
+			(1, 10000),
+			(2, 20000),
+			(3, 30000),
+			(4, 40000),
+			(10, 40000),
+			(20, 40000),
+			(pool_zero_account_id, 100_000), // Top up the default pool account id
+		],
+	}
+	.assimilate_storage(&mut t)
+	.unwrap();
+
+	let mut ext = sp_io::TestExternalities::new(t);
+	ext.execute_with(|| System::set_block_number(1));
+	ext
+}
diff --git a/substrate/frame/asset-rewards/src/tests.rs b/substrate/frame/asset-rewards/src/tests.rs
new file mode 100644
index 00000000000..399d6a54c93
--- /dev/null
+++ b/substrate/frame/asset-rewards/src/tests.rs
@@ -0,0 +1,1457 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::{mock::*, *};
+use frame_support::{
+	assert_err, assert_noop, assert_ok, hypothetically,
+	traits::{
+		fungible,
+		fungible::NativeOrWithId,
+		fungibles,
+		tokens::{Fortitude, Preservation},
+	},
+};
+use sp_runtime::{traits::BadOrigin, ArithmeticError, TokenError};
+
+const DEFAULT_STAKED_ASSET_ID: NativeOrWithId<u32> = NativeOrWithId::<u32>::WithId(1);
+const DEFAULT_REWARD_ASSET_ID: NativeOrWithId<u32> = NativeOrWithId::<u32>::Native;
+const DEFAULT_REWARD_RATE_PER_BLOCK: u128 = 100;
+const DEFAULT_EXPIRE_AFTER: u64 = 200;
+const DEFAULT_ADMIN: u128 = 1;
+
+/// Creates a basic pool with values:
+/// - Staking asset: 1
+/// - Reward asset: Native
+/// - Reward rate per block: 100
+/// - Lifetime: 100
+/// - Admin: 1
+///
+/// Useful to reduce boilerplate in tests when it's not important to customise or reuse pool
+/// params.
+pub fn create_default_pool() {
+	assert_ok!(StakingRewards::create_pool(
+		RuntimeOrigin::root(),
+		Box::new(DEFAULT_STAKED_ASSET_ID.clone()),
+		Box::new(DEFAULT_REWARD_ASSET_ID.clone()),
+		DEFAULT_REWARD_RATE_PER_BLOCK,
+		DispatchTime::After(DEFAULT_EXPIRE_AFTER),
+		Some(DEFAULT_ADMIN)
+	));
+}
+
+/// The same as [`create_default_pool`], but with the admin parameter set to the creator.
+pub fn create_default_pool_permissioned_admin() {
+	assert_ok!(StakingRewards::create_pool(
+		RuntimeOrigin::root(),
+		Box::new(DEFAULT_STAKED_ASSET_ID.clone()),
+		Box::new(DEFAULT_REWARD_ASSET_ID.clone()),
+		DEFAULT_REWARD_RATE_PER_BLOCK,
+		DispatchTime::After(DEFAULT_EXPIRE_AFTER),
+		Some(PermissionedAccountId::get()),
+	));
+}
+
+fn assert_hypothetically_earned(
+	staker: u128,
+	expected_earned: u128,
+	pool_id: u32,
+	reward_asset_id: NativeOrWithId<u32>,
+) {
+	hypothetically!({
+		// Get the pre-harvest balance.
+		let balance_before: <MockRuntime as Config>::Balance =
+			<<MockRuntime as Config>::Assets>::balance(reward_asset_id.clone(), &staker);
+
+		// Harvest the rewards.
+		assert_ok!(StakingRewards::harvest_rewards(RuntimeOrigin::signed(staker), pool_id, None),);
+
+		// Sanity check: staker rewards are reset to 0 if some `amount` is still staked, otherwise
+		// the storage item removed.
+		if let Some(staker_pool) = PoolStakers::<MockRuntime>::get(pool_id, staker) {
+			assert!(staker_pool.rewards == 0);
+			assert!(staker_pool.amount > 0);
+		}
+
+		// Check that the staker has earned the expected amount.
+		let balance_after =
+			<<MockRuntime as Config>::Assets>::balance(reward_asset_id.clone(), &staker);
+		assert_eq!(balance_after - balance_before, expected_earned);
+	});
+}
+
+fn events() -> Vec<Event<MockRuntime>> {
+	let result = System::events()
+		.into_iter()
+		.map(|r| r.event)
+		.filter_map(|e| {
+			if let mock::RuntimeEvent::StakingRewards(inner) = e {
+				Some(inner)
+			} else {
+				None
+			}
+		})
+		.collect();
+
+	System::reset_events();
+
+	result
+}
+
+fn pools() -> Vec<(u32, PoolInfo<u128, NativeOrWithId<u32>, u128, u64>)> {
+	Pools::<MockRuntime>::iter().collect()
+}
+
+mod create_pool {
+	use super::*;
+
+	#[test]
+	fn success() {
+		new_test_ext().execute_with(|| {
+			assert_eq!(NextPoolId::<MockRuntime>::get(), 0);
+
+			System::set_block_number(10);
+			let expected_expiry_block = DEFAULT_EXPIRE_AFTER + 10;
+
+			// Create a pool with default values, and no admin override so [`PermissionedAccountId`]
+			// is admin.
+			assert_ok!(StakingRewards::create_pool(
+				RuntimeOrigin::root(),
+				Box::new(DEFAULT_STAKED_ASSET_ID),
+				Box::new(DEFAULT_REWARD_ASSET_ID),
+				DEFAULT_REWARD_RATE_PER_BLOCK,
+				DispatchTime::After(DEFAULT_EXPIRE_AFTER),
+				Some(PermissionedAccountId::get())
+			));
+
+			// Event is emitted.
+			assert_eq!(
+				events(),
+				[Event::<MockRuntime>::PoolCreated {
+					creator: PermissionedAccountId::get(),
+					pool_id: 0,
+					staked_asset_id: DEFAULT_STAKED_ASSET_ID,
+					reward_asset_id: DEFAULT_REWARD_ASSET_ID,
+					reward_rate_per_block: DEFAULT_REWARD_RATE_PER_BLOCK,
+					expiry_block: expected_expiry_block,
+					admin: PermissionedAccountId::get(),
+				}]
+			);
+
+			// State is updated correctly.
+			assert_eq!(NextPoolId::<MockRuntime>::get(), 1);
+			assert_eq!(
+				pools(),
+				vec![(
+					0,
+					PoolInfo {
+						staked_asset_id: DEFAULT_STAKED_ASSET_ID,
+						reward_asset_id: DEFAULT_REWARD_ASSET_ID,
+						reward_rate_per_block: DEFAULT_REWARD_RATE_PER_BLOCK,
+						expiry_block: expected_expiry_block,
+						admin: PermissionedAccountId::get(),
+						total_tokens_staked: 0,
+						reward_per_token_stored: 0,
+						last_update_block: 0,
+						account: StakingRewards::pool_account_id(&0),
+					}
+				)]
+			);
+
+			// Create another pool with explicit admin and other overrides.
+			let admin = 2;
+			let staked_asset_id = NativeOrWithId::<u32>::WithId(10);
+			let reward_asset_id = NativeOrWithId::<u32>::WithId(20);
+			let reward_rate_per_block = 250;
+			let expiry_block = 500;
+			let expected_expiry_block = expiry_block + 10;
+			assert_ok!(StakingRewards::create_pool(
+				RuntimeOrigin::root(),
+				Box::new(staked_asset_id.clone()),
+				Box::new(reward_asset_id.clone()),
+				reward_rate_per_block,
+				DispatchTime::After(expiry_block),
+				Some(admin)
+			));
+
+			// Event is emitted.
+			assert_eq!(
+				events(),
+				[Event::<MockRuntime>::PoolCreated {
+					creator: PermissionedAccountId::get(),
+					pool_id: 1,
+					staked_asset_id: staked_asset_id.clone(),
+					reward_asset_id: reward_asset_id.clone(),
+					reward_rate_per_block,
+					admin,
+					expiry_block: expected_expiry_block,
+				}]
+			);
+
+			// State is updated correctly.
+			assert_eq!(NextPoolId::<MockRuntime>::get(), 2);
+			assert_eq!(
+				pools(),
+				vec![
+					(
+						0,
+						PoolInfo {
+							staked_asset_id: DEFAULT_STAKED_ASSET_ID,
+							reward_asset_id: DEFAULT_REWARD_ASSET_ID,
+							reward_rate_per_block: DEFAULT_REWARD_RATE_PER_BLOCK,
+							admin: PermissionedAccountId::get(),
+							expiry_block: DEFAULT_EXPIRE_AFTER + 10,
+							total_tokens_staked: 0,
+							reward_per_token_stored: 0,
+							last_update_block: 0,
+							account: StakingRewards::pool_account_id(&0),
+						}
+					),
+					(
+						1,
+						PoolInfo {
+							staked_asset_id,
+							reward_asset_id,
+							reward_rate_per_block,
+							admin,
+							total_tokens_staked: 0,
+							expiry_block: expected_expiry_block,
+							reward_per_token_stored: 0,
+							last_update_block: 0,
+							account: StakingRewards::pool_account_id(&1),
+						}
+					)
+				]
+			);
+		});
+	}
+
+	#[test]
+	fn success_same_assets() {
+		new_test_ext().execute_with(|| {
+			assert_eq!(NextPoolId::<MockRuntime>::get(), 0);
+
+			System::set_block_number(10);
+			let expected_expiry_block = DEFAULT_EXPIRE_AFTER + 10;
+
+			// Create a pool with the same staking and reward asset.
+			let asset = NativeOrWithId::<u32>::Native;
+			assert_ok!(StakingRewards::create_pool(
+				RuntimeOrigin::root(),
+				Box::new(asset.clone()),
+				Box::new(asset.clone()),
+				DEFAULT_REWARD_RATE_PER_BLOCK,
+				DispatchTime::After(DEFAULT_EXPIRE_AFTER),
+				Some(PermissionedAccountId::get())
+			));
+
+			// Event is emitted.
+			assert_eq!(
+				events(),
+				[Event::<MockRuntime>::PoolCreated {
+					creator: PermissionedAccountId::get(),
+					pool_id: 0,
+					staked_asset_id: asset.clone(),
+					reward_asset_id: asset.clone(),
+					reward_rate_per_block: DEFAULT_REWARD_RATE_PER_BLOCK,
+					expiry_block: expected_expiry_block,
+					admin: PermissionedAccountId::get(),
+				}]
+			);
+
+			// State is updated correctly.
+			assert_eq!(NextPoolId::<MockRuntime>::get(), 1);
+			assert_eq!(
+				pools(),
+				vec![(
+					0,
+					PoolInfo {
+						staked_asset_id: asset.clone(),
+						reward_asset_id: asset,
+						reward_rate_per_block: DEFAULT_REWARD_RATE_PER_BLOCK,
+						expiry_block: expected_expiry_block,
+						admin: PermissionedAccountId::get(),
+						total_tokens_staked: 0,
+						reward_per_token_stored: 0,
+						last_update_block: 0,
+						account: StakingRewards::pool_account_id(&0),
+					}
+				)]
+			);
+		})
+	}
+
+	#[test]
+	fn fails_for_non_existent_asset() {
+		new_test_ext().execute_with(|| {
+			let valid_asset = NativeOrWithId::<u32>::WithId(1);
+			let invalid_asset = NativeOrWithId::<u32>::WithId(200);
+
+			assert_err!(
+				StakingRewards::create_pool(
+					RuntimeOrigin::root(),
+					Box::new(valid_asset.clone()),
+					Box::new(invalid_asset.clone()),
+					10,
+					DispatchTime::After(10u64),
+					None
+				),
+				Error::<MockRuntime>::NonExistentAsset
+			);
+
+			assert_err!(
+				StakingRewards::create_pool(
+					RuntimeOrigin::root(),
+					Box::new(invalid_asset.clone()),
+					Box::new(valid_asset.clone()),
+					10,
+					DispatchTime::After(10u64),
+					None
+				),
+				Error::<MockRuntime>::NonExistentAsset
+			);
+
+			assert_err!(
+				StakingRewards::create_pool(
+					RuntimeOrigin::root(),
+					Box::new(invalid_asset.clone()),
+					Box::new(invalid_asset.clone()),
+					10,
+					DispatchTime::After(10u64),
+					None
+				),
+				Error::<MockRuntime>::NonExistentAsset
+			);
+		})
+	}
+
+	#[test]
+	fn fails_for_not_permissioned() {
+		new_test_ext().execute_with(|| {
+			let user = 100;
+			let staked_asset_id = NativeOrWithId::<u32>::Native;
+			let reward_asset_id = NativeOrWithId::<u32>::WithId(1);
+			let reward_rate_per_block = 100;
+			let expiry_block = 100u64;
+			assert_err!(
+				StakingRewards::create_pool(
+					RuntimeOrigin::signed(user),
+					Box::new(staked_asset_id.clone()),
+					Box::new(reward_asset_id.clone()),
+					reward_rate_per_block,
+					DispatchTime::After(expiry_block),
+					None
+				),
+				BadOrigin
+			);
+		});
+	}
+
+	#[test]
+	fn create_pool_with_caller_admin() {
+		new_test_ext().execute_with(|| {
+			assert_eq!(NextPoolId::<MockRuntime>::get(), 0);
+
+			System::set_block_number(10);
+			let expected_expiry_block = DEFAULT_EXPIRE_AFTER + 10;
+
+			assert_ok!(StakingRewards::create_pool(
+				RuntimeOrigin::root(),
+				Box::new(DEFAULT_STAKED_ASSET_ID),
+				Box::new(DEFAULT_REWARD_ASSET_ID),
+				DEFAULT_REWARD_RATE_PER_BLOCK,
+				DispatchTime::After(DEFAULT_EXPIRE_AFTER),
+				None,
+			));
+
+			assert_eq!(
+				events(),
+				[Event::<MockRuntime>::PoolCreated {
+					creator: PermissionedAccountId::get(),
+					pool_id: 0,
+					staked_asset_id: DEFAULT_STAKED_ASSET_ID,
+					reward_asset_id: DEFAULT_REWARD_ASSET_ID,
+					reward_rate_per_block: DEFAULT_REWARD_RATE_PER_BLOCK,
+					expiry_block: expected_expiry_block,
+					admin: PermissionedAccountId::get(),
+				}]
+			);
+
+			assert_eq!(Pools::<MockRuntime>::get(0).unwrap().admin, PermissionedAccountId::get());
+		});
+	}
+}
+
+mod stake {
+	use super::*;
+
+	#[test]
+	fn success() {
+		new_test_ext().execute_with(|| {
+			let user = 1;
+			create_default_pool();
+			let pool_id = 0;
+			let initial_balance = <Assets as fungibles::Inspect<u128>>::reducible_balance(
+				1,
+				&user,
+				Preservation::Expendable,
+				Fortitude::Force,
+			);
+
+			// User stakes tokens
+			assert_ok!(StakingRewards::stake(RuntimeOrigin::signed(user), pool_id, 1000));
+
+			// Check that the user's staked amount is updated
+			assert_eq!(PoolStakers::<MockRuntime>::get(pool_id, user).unwrap().amount, 1000);
+
+			// Event is emitted.
+			assert_eq!(
+				*events().last().unwrap(),
+				Event::<MockRuntime>::Staked { staker: user, amount: 1000, pool_id: 0 }
+			);
+
+			// Check that the pool's total tokens staked is updated
+			assert_eq!(Pools::<MockRuntime>::get(pool_id).unwrap().total_tokens_staked, 1000);
+
+			// Check user's frozen balance is updated
+			assert_eq!(
+				<Assets as fungibles::Inspect<u128>>::reducible_balance(
+					1,
+					&user,
+					Preservation::Expendable,
+					Fortitude::Force,
+				),
+				// - extra 1 for ed
+				initial_balance - 1000 - 1
+			);
+
+			// User stakes more tokens
+			assert_ok!(StakingRewards::stake(RuntimeOrigin::signed(user), pool_id, 500));
+
+			// Event is emitted.
+			assert_eq!(
+				*events().last().unwrap(),
+				Event::<MockRuntime>::Staked { staker: user, amount: 500, pool_id: 0 }
+			);
+
+			// Check that the user's staked amount is updated
+			assert_eq!(PoolStakers::<MockRuntime>::get(pool_id, user).unwrap().amount, 1000 + 500);
+
+			// Check that the pool's total tokens staked is updated
+			assert_eq!(Pools::<MockRuntime>::get(pool_id).unwrap().total_tokens_staked, 1000 + 500);
+
+			assert_eq!(
+				<Assets as fungibles::Inspect<u128>>::reducible_balance(
+					1,
+					&user,
+					Preservation::Expendable,
+					Fortitude::Force,
+				),
+				// - extra 1 for ed
+				initial_balance - 1500 - 1
+			);
+
+			// Event is emitted.
+			assert_eq!(events(), []);
+		});
+	}
+
+	#[test]
+	fn fails_for_non_existent_pool() {
+		new_test_ext().execute_with(|| {
+			let user = 1;
+			assert_err!(
+				StakingRewards::stake(RuntimeOrigin::signed(user), 999, 1000),
+				Error::<MockRuntime>::NonExistentPool
+			);
+		});
+	}
+
+	#[test]
+	fn fails_for_insufficient_balance() {
+		new_test_ext().execute_with(|| {
+			let user = 1;
+			create_default_pool();
+			let pool_id = 0;
+			let initial_balance = <Assets as fungibles::Inspect<u128>>::reducible_balance(
+				1,
+				&user,
+				Preservation::Expendable,
+				Fortitude::Force,
+			);
+			assert_err!(
+				StakingRewards::stake(RuntimeOrigin::signed(user), pool_id, initial_balance + 1),
+				TokenError::FundsUnavailable,
+			);
+		})
+	}
+}
+
+mod unstake {
+	use super::*;
+
+	#[test]
+	fn success() {
+		new_test_ext().execute_with(|| {
+			let user = 1;
+			create_default_pool();
+			let pool_id = 0;
+
+			// User stakes tokens
+			assert_ok!(StakingRewards::stake(RuntimeOrigin::signed(user), pool_id, 1000));
+
+			// User unstakes tokens
+			assert_ok!(StakingRewards::unstake(RuntimeOrigin::signed(user), pool_id, 500, None));
+
+			// Event is emitted.
+			assert_eq!(
+				*events().last().unwrap(),
+				Event::<MockRuntime>::Unstaked {
+					caller: user,
+					staker: user,
+					amount: 500,
+					pool_id: 0
+				}
+			);
+
+			// Check that the user's staked amount is updated
+			assert_eq!(PoolStakers::<MockRuntime>::get(pool_id, user).unwrap().amount, 500);
+
+			// Check that the pool's total tokens staked is updated
+			assert_eq!(Pools::<MockRuntime>::get(pool_id).unwrap().total_tokens_staked, 500);
+
+			// User unstakes remaining tokens
+			assert_ok!(StakingRewards::unstake(RuntimeOrigin::signed(user), pool_id, 500, None));
+
+			// Check that the storage items is removed since stake amount and rewards are zero.
+			assert!(PoolStakers::<MockRuntime>::get(pool_id, user).is_none());
+
+			// Check that the pool's total tokens staked is zero
+			assert_eq!(Pools::<MockRuntime>::get(pool_id).unwrap().total_tokens_staked, 0);
+		});
+	}
+
+	#[test]
+	fn unstake_for_other() {
+		new_test_ext().execute_with(|| {
+			let staker = 1;
+			let caller = 2;
+			let pool_id = 0;
+			let init_block = System::block_number();
+
+			create_default_pool();
+
+			// User stakes tokens
+			assert_ok!(StakingRewards::stake(RuntimeOrigin::signed(staker), pool_id, 1000));
+
+			// Fails to unstake for other since pool is still active
+			assert_noop!(
+				StakingRewards::unstake(RuntimeOrigin::signed(caller), pool_id, 500, Some(staker)),
+				BadOrigin,
+			);
+
+			System::set_block_number(init_block + DEFAULT_EXPIRE_AFTER + 1);
+
+			assert_ok!(StakingRewards::unstake(
+				RuntimeOrigin::signed(caller),
+				pool_id,
+				500,
+				Some(staker)
+			));
+
+			// Event is emitted.
+			assert_eq!(
+				*events().last().unwrap(),
+				Event::<MockRuntime>::Unstaked { caller, staker, amount: 500, pool_id: 0 }
+			);
+		});
+	}
+
+	#[test]
+	fn fails_for_non_existent_pool() {
+		new_test_ext().execute_with(|| {
+			let user = 1;
+			let non_existent_pool_id = 999;
+
+			// User tries to unstake tokens from a non-existent pool
+			assert_err!(
+				StakingRewards::unstake(
+					RuntimeOrigin::signed(user),
+					non_existent_pool_id,
+					500,
+					None
+				),
+				Error::<MockRuntime>::NonExistentPool
+			);
+		});
+	}
+
+	#[test]
+	fn fails_for_insufficient_staked_amount() {
+		new_test_ext().execute_with(|| {
+			let user = 1;
+			create_default_pool();
+			let pool_id = 0;
+
+			// User stakes tokens
+			assert_ok!(StakingRewards::stake(RuntimeOrigin::signed(user), pool_id, 1000));
+
+			// User tries to unstake more tokens than they have staked
+			assert_err!(
+				StakingRewards::unstake(RuntimeOrigin::signed(user), pool_id, 1500, None),
+				Error::<MockRuntime>::NotEnoughTokens
+			);
+		});
+	}
+}
+
+mod harvest_rewards {
+	use super::*;
+
+	#[test]
+	fn success() {
+		new_test_ext().execute_with(|| {
+			let staker = 1;
+			let pool_id = 0;
+			let reward_asset_id = NativeOrWithId::<u32>::Native;
+			create_default_pool();
+
+			// Stake
+			System::set_block_number(10);
+			assert_ok!(StakingRewards::stake(RuntimeOrigin::signed(staker), pool_id, 1000));
+
+			// Harvest
+			System::set_block_number(20);
+			let balance_before: <MockRuntime as Config>::Balance =
+				<<MockRuntime as Config>::Assets>::balance(reward_asset_id.clone(), &staker);
+			assert_ok!(StakingRewards::harvest_rewards(
+				RuntimeOrigin::signed(staker),
+				pool_id,
+				None
+			));
+			let balance_after =
+				<<MockRuntime as Config>::Assets>::balance(reward_asset_id.clone(), &staker);
+
+			// Assert
+			assert_eq!(
+				balance_after - balance_before,
+				10 * Pools::<MockRuntime>::get(pool_id).unwrap().reward_rate_per_block
+			);
+			assert_eq!(
+				*events().last().unwrap(),
+				Event::<MockRuntime>::RewardsHarvested {
+					caller: staker,
+					staker,
+					pool_id,
+					amount: 10 * Pools::<MockRuntime>::get(pool_id).unwrap().reward_rate_per_block
+				}
+			);
+		});
+	}
+
+	#[test]
+	fn harvest_for_other() {
+		new_test_ext().execute_with(|| {
+			let caller = 2;
+			let staker = 1;
+			let pool_id = 0;
+			let init_block = System::block_number();
+
+			create_default_pool();
+
+			// Stake
+			System::set_block_number(10);
+			assert_ok!(StakingRewards::stake(RuntimeOrigin::signed(staker), pool_id, 1000));
+
+			System::set_block_number(20);
+
+			// Fails to harvest for staker since pool is still active
+			assert_noop!(
+				StakingRewards::harvest_rewards(
+					RuntimeOrigin::signed(caller),
+					pool_id,
+					Some(staker)
+				),
+				BadOrigin
+			);
+
+			System::set_block_number(init_block + DEFAULT_EXPIRE_AFTER + 1);
+
+			// Harvest for staker
+			assert_ok!(StakingRewards::harvest_rewards(
+				RuntimeOrigin::signed(caller),
+				pool_id,
+				Some(staker),
+			));
+
+			assert!(matches!(
+				events().last().unwrap(),
+				Event::<MockRuntime>::RewardsHarvested {
+					caller,
+					staker,
+					pool_id,
+					..
+				} if caller == caller && staker == staker && pool_id == pool_id
+			));
+		});
+	}
+
+	#[test]
+	fn fails_for_non_existent_staker() {
+		new_test_ext().execute_with(|| {
+			let non_existent_staker = 999;
+
+			create_default_pool();
+			assert_err!(
+				StakingRewards::harvest_rewards(
+					RuntimeOrigin::signed(non_existent_staker),
+					0,
+					None
+				),
+				Error::<MockRuntime>::NonExistentStaker
+			);
+		});
+	}
+
+	#[test]
+	fn fails_for_non_existent_pool() {
+		new_test_ext().execute_with(|| {
+			let staker = 1;
+			let non_existent_pool_id = 999;
+
+			assert_err!(
+				StakingRewards::harvest_rewards(
+					RuntimeOrigin::signed(staker),
+					non_existent_pool_id,
+					None,
+				),
+				Error::<MockRuntime>::NonExistentPool
+			);
+		});
+	}
+}
+
+mod set_pool_admin {
+	use super::*;
+
+	#[test]
+	fn success_signed_admin() {
+		new_test_ext().execute_with(|| {
+			let admin = 1;
+			let new_admin = 2;
+			let pool_id = 0;
+			create_default_pool();
+
+			// Modify the pool admin
+			assert_ok!(StakingRewards::set_pool_admin(
+				RuntimeOrigin::signed(admin),
+				pool_id,
+				new_admin,
+			));
+
+			// Check state
+			assert_eq!(
+				*events().last().unwrap(),
+				Event::<MockRuntime>::PoolAdminModified { pool_id, new_admin }
+			);
+			assert_eq!(Pools::<MockRuntime>::get(pool_id).unwrap().admin, new_admin);
+		});
+	}
+
+	#[test]
+	fn success_permissioned_admin() {
+		new_test_ext().execute_with(|| {
+			let new_admin = 2;
+			let pool_id = 0;
+			create_default_pool_permissioned_admin();
+
+			// Modify the pool admin
+			assert_ok!(StakingRewards::set_pool_admin(RuntimeOrigin::root(), pool_id, new_admin));
+
+			// Check state
+			assert_eq!(
+				*events().last().unwrap(),
+				Event::<MockRuntime>::PoolAdminModified { pool_id, new_admin }
+			);
+			assert_eq!(Pools::<MockRuntime>::get(pool_id).unwrap().admin, new_admin);
+		});
+	}
+
+	#[test]
+	fn fails_for_non_existent_pool() {
+		new_test_ext().execute_with(|| {
+			let admin = 1;
+			let new_admin = 2;
+			let non_existent_pool_id = 999;
+
+			assert_err!(
+				StakingRewards::set_pool_admin(
+					RuntimeOrigin::signed(admin),
+					non_existent_pool_id,
+					new_admin
+				),
+				Error::<MockRuntime>::NonExistentPool
+			);
+		});
+	}
+
+	#[test]
+	fn fails_for_non_admin() {
+		new_test_ext().execute_with(|| {
+			let new_admin = 2;
+			let non_admin = 3;
+			let pool_id = 0;
+			create_default_pool();
+
+			assert_err!(
+				StakingRewards::set_pool_admin(
+					RuntimeOrigin::signed(non_admin),
+					pool_id,
+					new_admin
+				),
+				BadOrigin
+			);
+		});
+	}
+}
+
+mod set_pool_expiry_block {
+	use super::*;
+
+	#[test]
+	fn success_permissioned_admin() {
+		new_test_ext().execute_with(|| {
+			let pool_id = 0;
+			let new_expiry_block = System::block_number() + DEFAULT_EXPIRE_AFTER + 1u64;
+			create_default_pool_permissioned_admin();
+
+			assert_ok!(StakingRewards::set_pool_expiry_block(
+				RuntimeOrigin::root(),
+				pool_id,
+				DispatchTime::At(new_expiry_block),
+			));
+
+			// Check state
+			assert_eq!(Pools::<MockRuntime>::get(pool_id).unwrap().expiry_block, new_expiry_block);
+			assert_eq!(
+				*events().last().unwrap(),
+				Event::<MockRuntime>::PoolExpiryBlockModified { pool_id, new_expiry_block }
+			);
+		});
+	}
+
+	#[test]
+	fn success_signed_admin() {
+		new_test_ext().execute_with(|| {
+			let admin = 1;
+			let pool_id = 0;
+			let new_expiry_block = System::block_number() + DEFAULT_EXPIRE_AFTER + 1u64;
+			create_default_pool();
+
+			assert_ok!(StakingRewards::set_pool_expiry_block(
+				RuntimeOrigin::signed(admin),
+				pool_id,
+				DispatchTime::At(new_expiry_block)
+			));
+
+			// Check state
+			assert_eq!(Pools::<MockRuntime>::get(pool_id).unwrap().expiry_block, new_expiry_block);
+			assert_eq!(
+				*events().last().unwrap(),
+				Event::<MockRuntime>::PoolExpiryBlockModified { pool_id, new_expiry_block }
+			);
+		});
+	}
+
+	#[test]
+	fn extends_reward_accumulation() {
+		new_test_ext().execute_with(|| {
+			let admin = 1;
+			let staker = 2;
+			let pool_id = 0;
+			let new_expiry_block = 300u64;
+			System::set_block_number(10);
+			create_default_pool();
+
+			// Regular reward accumulation
+			assert_ok!(StakingRewards::stake(RuntimeOrigin::signed(staker), pool_id, 1000));
+			System::set_block_number(20);
+			assert_hypothetically_earned(
+				staker,
+				DEFAULT_REWARD_RATE_PER_BLOCK * 10,
+				pool_id,
+				NativeOrWithId::<u32>::Native,
+			);
+
+			// Expiry was block 210, so earned 200 at block 250
+			System::set_block_number(250);
+			assert_hypothetically_earned(
+				staker,
+				DEFAULT_REWARD_RATE_PER_BLOCK * 200,
+				pool_id,
+				NativeOrWithId::<u32>::Native,
+			);
+
+			// Extend expiry 50 more blocks
+			assert_ok!(StakingRewards::set_pool_expiry_block(
+				RuntimeOrigin::signed(admin),
+				pool_id,
+				DispatchTime::At(new_expiry_block)
+			));
+			System::set_block_number(350);
+
+			// Staker has been in pool with rewards active for 250 blocks total
+			assert_hypothetically_earned(
+				staker,
+				DEFAULT_REWARD_RATE_PER_BLOCK * 250,
+				pool_id,
+				NativeOrWithId::<u32>::Native,
+			);
+		});
+	}
+
+	#[test]
+	fn fails_to_cutback_expiration() {
+		new_test_ext().execute_with(|| {
+			let admin = 1;
+			let pool_id = 0;
+			create_default_pool();
+
+			assert_noop!(
+				StakingRewards::set_pool_expiry_block(
+					RuntimeOrigin::signed(admin),
+					pool_id,
+					DispatchTime::After(30)
+				),
+				Error::<MockRuntime>::ExpiryCut
+			);
+		});
+	}
+
+	#[test]
+	fn fails_for_non_existent_pool() {
+		new_test_ext().execute_with(|| {
+			let admin = 1;
+			let non_existent_pool_id = 999;
+			let new_expiry_block = 200u64;
+
+			assert_err!(
+				StakingRewards::set_pool_expiry_block(
+					RuntimeOrigin::signed(admin),
+					non_existent_pool_id,
+					DispatchTime::After(new_expiry_block)
+				),
+				Error::<MockRuntime>::NonExistentPool
+			);
+		});
+	}
+
+	#[test]
+	fn fails_for_non_admin() {
+		new_test_ext().execute_with(|| {
+			let non_admin = 2;
+			let pool_id = 0;
+			let new_expiry_block = 200u64;
+			create_default_pool();
+
+			assert_err!(
+				StakingRewards::set_pool_expiry_block(
+					RuntimeOrigin::signed(non_admin),
+					pool_id,
+					DispatchTime::After(new_expiry_block)
+				),
+				BadOrigin
+			);
+		});
+	}
+
+	#[test]
+	fn fails_for_expiry_block_in_the_past() {
+		new_test_ext().execute_with(|| {
+			let admin = 1;
+			let pool_id = 0;
+			create_default_pool();
+			System::set_block_number(50);
+			assert_err!(
+				StakingRewards::set_pool_expiry_block(
+					RuntimeOrigin::signed(admin),
+					pool_id,
+					DispatchTime::At(40u64)
+				),
+				Error::<MockRuntime>::ExpiryBlockMustBeInTheFuture
+			);
+		});
+	}
+}
+
+mod set_pool_reward_rate_per_block {
+	use super::*;
+
+	#[test]
+	fn success_signed_admin() {
+		new_test_ext().execute_with(|| {
+			let pool_id = 0;
+			let new_reward_rate = 200;
+			create_default_pool();
+
+			// Pool Admin can modify
+			assert_ok!(StakingRewards::set_pool_reward_rate_per_block(
+				RuntimeOrigin::signed(DEFAULT_ADMIN),
+				pool_id,
+				new_reward_rate
+			));
+
+			// Check state
+			assert_eq!(
+				Pools::<MockRuntime>::get(pool_id).unwrap().reward_rate_per_block,
+				new_reward_rate
+			);
+
+			// Check event
+			assert_eq!(
+				*events().last().unwrap(),
+				Event::<MockRuntime>::PoolRewardRateModified {
+					pool_id,
+					new_reward_rate_per_block: new_reward_rate
+				}
+			);
+		});
+	}
+
+	#[test]
+	fn success_permissioned_admin() {
+		new_test_ext().execute_with(|| {
+			let pool_id = 0;
+			let new_reward_rate = 200;
+			create_default_pool_permissioned_admin();
+
+			// Root can modify
+			assert_ok!(StakingRewards::set_pool_reward_rate_per_block(
+				RuntimeOrigin::root(),
+				pool_id,
+				new_reward_rate
+			));
+
+			// Check state
+			assert_eq!(
+				Pools::<MockRuntime>::get(pool_id).unwrap().reward_rate_per_block,
+				new_reward_rate
+			);
+
+			// Check event
+			assert_eq!(
+				*events().last().unwrap(),
+				Event::<MockRuntime>::PoolRewardRateModified {
+					pool_id,
+					new_reward_rate_per_block: new_reward_rate
+				}
+			);
+		});
+	}
+
+	#[test]
+	fn staker_rewards_are_affected_correctly() {
+		new_test_ext().execute_with(|| {
+			let admin = 1;
+			let staker = 2;
+			let pool_id = 0;
+			let new_reward_rate = 150;
+			create_default_pool();
+
+			// Stake some tokens, and accumulate 10 blocks of rewards at the default pool rate (100)
+			System::set_block_number(10);
+			assert_ok!(StakingRewards::stake(RuntimeOrigin::signed(staker), pool_id, 1000));
+			System::set_block_number(20);
+
+			// Increase the reward rate
+			assert_ok!(StakingRewards::set_pool_reward_rate_per_block(
+				RuntimeOrigin::signed(admin),
+				pool_id,
+				new_reward_rate
+			));
+
+			// Accumulate 10 blocks of rewards at the new rate
+			System::set_block_number(30);
+
+			// Check that rewards are calculated correctly with the updated rate
+			assert_hypothetically_earned(
+				staker,
+				10 * 100 + 10 * new_reward_rate,
+				pool_id,
+				NativeOrWithId::<u32>::Native,
+			);
+		});
+	}
+
+	#[test]
+	fn fails_for_non_existent_pool() {
+		new_test_ext().execute_with(|| {
+			let admin = 1;
+			let non_existent_pool_id = 999;
+			let new_reward_rate = 200;
+
+			assert_err!(
+				StakingRewards::set_pool_reward_rate_per_block(
+					RuntimeOrigin::signed(admin),
+					non_existent_pool_id,
+					new_reward_rate
+				),
+				Error::<MockRuntime>::NonExistentPool
+			);
+		});
+	}
+
+	#[test]
+	fn fails_for_non_admin() {
+		new_test_ext().execute_with(|| {
+			let non_admin = 2;
+			let pool_id = 0;
+			let new_reward_rate = 200;
+			create_default_pool();
+
+			assert_err!(
+				StakingRewards::set_pool_reward_rate_per_block(
+					RuntimeOrigin::signed(non_admin),
+					pool_id,
+					new_reward_rate
+				),
+				BadOrigin
+			);
+		});
+	}
+
+	#[test]
+	fn fails_to_decrease() {
+		new_test_ext().execute_with(|| {
+			create_default_pool_permissioned_admin();
+
+			assert_noop!(
+				StakingRewards::set_pool_reward_rate_per_block(
+					RuntimeOrigin::root(),
+					0,
+					DEFAULT_REWARD_RATE_PER_BLOCK - 1
+				),
+				Error::<MockRuntime>::RewardRateCut
+			);
+		});
+	}
+}
+
+mod deposit_reward_tokens {
+	use super::*;
+
+	#[test]
+	fn success() {
+		new_test_ext().execute_with(|| {
+			let depositor = 1;
+			let pool_id = 0;
+			let amount = 1000;
+			let reward_asset_id = NativeOrWithId::<u32>::Native;
+			create_default_pool();
+			let pool_account_id = StakingRewards::pool_account_id(&pool_id);
+
+			let depositor_balance_before =
+				<<MockRuntime as Config>::Assets>::balance(reward_asset_id.clone(), &depositor);
+			let pool_balance_before = <<MockRuntime as Config>::Assets>::balance(
+				reward_asset_id.clone(),
+				&pool_account_id,
+			);
+			assert_ok!(StakingRewards::deposit_reward_tokens(
+				RuntimeOrigin::signed(depositor),
+				pool_id,
+				amount
+			));
+			let depositor_balance_after =
+				<<MockRuntime as Config>::Assets>::balance(reward_asset_id.clone(), &depositor);
+			let pool_balance_after =
+				<<MockRuntime as Config>::Assets>::balance(reward_asset_id, &pool_account_id);
+
+			assert_eq!(pool_balance_after - pool_balance_before, amount);
+			assert_eq!(depositor_balance_before - depositor_balance_after, amount);
+		});
+	}
+
+	#[test]
+	fn fails_for_non_existent_pool() {
+		new_test_ext().execute_with(|| {
+			assert_err!(
+				StakingRewards::deposit_reward_tokens(RuntimeOrigin::signed(1), 999, 100),
+				Error::<MockRuntime>::NonExistentPool
+			);
+		});
+	}
+
+	#[test]
+	fn fails_for_insufficient_balance() {
+		new_test_ext().execute_with(|| {
+			create_default_pool();
+			assert_err!(
+				StakingRewards::deposit_reward_tokens(RuntimeOrigin::signed(1), 0, 100_000_000),
+				ArithmeticError::Underflow
+			);
+		});
+	}
+}
+
+mod cleanup_pool {
+	use super::*;
+
+	#[test]
+	fn success() {
+		new_test_ext().execute_with(|| {
+			let pool_id = 0;
+			let admin = DEFAULT_ADMIN;
+			let admin_balance_before = <Balances as fungible::Inspect<u128>>::balance(&admin);
+
+			create_default_pool();
+			assert!(Pools::<MockRuntime>::get(pool_id).is_some());
+
+			assert_ok!(StakingRewards::cleanup_pool(RuntimeOrigin::signed(admin), pool_id));
+
+			assert_eq!(
+				<Balances as fungible::Inspect<u128>>::balance(&admin),
+				// `100_000` initial pool account balance from Genesis config
+				admin_balance_before + 100_000,
+			);
+			assert_eq!(Pools::<MockRuntime>::get(pool_id), None);
+			assert_eq!(PoolStakers::<MockRuntime>::iter_prefix_values(pool_id).count(), 0);
+			assert_eq!(PoolCost::<MockRuntime>::get(pool_id), None);
+		});
+	}
+
+	#[test]
+	fn success_only_when_pool_empty() {
+		new_test_ext().execute_with(|| {
+			let pool_id = 0;
+			let staker = 20;
+			let admin = DEFAULT_ADMIN;
+
+			create_default_pool();
+
+			// stake to prevent pool cleanup
+			assert_ok!(StakingRewards::stake(RuntimeOrigin::signed(staker), pool_id, 100));
+
+			assert_noop!(
+				StakingRewards::cleanup_pool(RuntimeOrigin::signed(admin), pool_id),
+				Error::<MockRuntime>::NonEmptyPool
+			);
+
+			// unstake partially
+			assert_ok!(StakingRewards::unstake(RuntimeOrigin::signed(staker), pool_id, 50, None));
+
+			assert_noop!(
+				StakingRewards::cleanup_pool(RuntimeOrigin::signed(admin), pool_id),
+				Error::<MockRuntime>::NonEmptyPool
+			);
+
+			// unstake all
+			assert_ok!(StakingRewards::unstake(RuntimeOrigin::signed(staker), pool_id, 50, None));
+
+			assert_ok!(StakingRewards::cleanup_pool(RuntimeOrigin::signed(admin), pool_id),);
+
+			assert_eq!(Pools::<MockRuntime>::get(pool_id), None);
+			assert_eq!(PoolStakers::<MockRuntime>::iter_prefix_values(pool_id).count(), 0);
+			assert_eq!(PoolCost::<MockRuntime>::get(pool_id), None);
+		});
+	}
+
+	#[test]
+	fn fails_on_wrong_origin() {
+		new_test_ext().execute_with(|| {
+			let caller = 888;
+			let pool_id = 0;
+			create_default_pool();
+
+			assert_noop!(
+				StakingRewards::cleanup_pool(RuntimeOrigin::signed(caller), pool_id),
+				BadOrigin
+			);
+		});
+	}
+}
+
+/// This integration test
+/// 1. Considers 2 stakers each staking and unstaking at different intervals, asserts their
+///    claimable rewards are adjusted as expected, and that harvesting works.
+/// 2. Checks that rewards are correctly halted after the pool's expiry block, and resume when the
+///    pool is extended.
+/// 3. Checks that reward rates adjustment works correctly.
+///
+/// Note: There are occasionally off by 1 errors due to rounding. In practice this is
+/// insignificant.
+#[test]
+fn integration() {
+	new_test_ext().execute_with(|| {
+		let admin = 1;
+		let staker1 = 10u128;
+		let staker2 = 20;
+		let staked_asset_id = NativeOrWithId::<u32>::WithId(1);
+		let reward_asset_id = NativeOrWithId::<u32>::Native;
+		let reward_rate_per_block = 100;
+		let lifetime = 24u64.into();
+		System::set_block_number(1);
+		assert_ok!(StakingRewards::create_pool(
+			RuntimeOrigin::root(),
+			Box::new(staked_asset_id.clone()),
+			Box::new(reward_asset_id.clone()),
+			reward_rate_per_block,
+			DispatchTime::After(lifetime),
+			Some(admin)
+		));
+		let pool_id = 0;
+
+		// Block 7: Staker 1 stakes 100 tokens.
+		System::set_block_number(7);
+		assert_ok!(StakingRewards::stake(RuntimeOrigin::signed(staker1), pool_id, 100));
+		// At this point
+		// - Staker 1 has earned 0 tokens.
+		// - Staker 1 is earning 100 tokens per block.
+
+		// Check that Staker 1 has earned 0 tokens.
+		assert_hypothetically_earned(staker1, 0, pool_id, reward_asset_id.clone());
+
+		// Block 9: Staker 2 stakes 100 tokens.
+		System::set_block_number(9);
+		assert_ok!(StakingRewards::stake(RuntimeOrigin::signed(staker2), pool_id, 100));
+		// At this point
+		// - Staker 1 has earned 200 (100*2) tokens.
+		// - Staker 2 has earned 0 tokens.
+		// - Staker 1 is earning 50 tokens per block.
+		// - Staker 2 is earning 50 tokens per block.
+
+		// Check that Staker 1 has earned 200 tokens and Staker 2 has earned 0 tokens.
+		assert_hypothetically_earned(staker1, 200, pool_id, reward_asset_id.clone());
+		assert_hypothetically_earned(staker2, 0, pool_id, reward_asset_id.clone());
+
+		// Block 12: Staker 1 stakes an additional 100 tokens.
+		System::set_block_number(12);
+		assert_ok!(StakingRewards::stake(RuntimeOrigin::signed(staker1), pool_id, 100));
+		// At this point
+		// - Staker 1 has earned 350 (200 + (50 * 3)) tokens.
+		// - Staker 2 has earned 150 (50 * 3) tokens.
+		// - Staker 1 is earning 66.66 tokens per block.
+		// - Staker 2 is earning 33.33 tokens per block.
+
+		// Check that Staker 1 has earned 350 tokens and Staker 2 has earned 150 tokens.
+		assert_hypothetically_earned(staker1, 350, pool_id, reward_asset_id.clone());
+		assert_hypothetically_earned(staker2, 150, pool_id, reward_asset_id.clone());
+
+		// Block 22: Staker 1 unstakes 100 tokens.
+		System::set_block_number(22);
+		assert_ok!(StakingRewards::unstake(RuntimeOrigin::signed(staker1), pool_id, 100, None));
+		// - Staker 1 has earned 1016 (350 + 66.66 * 10) tokens.
+		// - Staker 2 has earned 483 (150 + 33.33 * 10) tokens.
+		// - Staker 1 is earning 50 tokens per block.
+		// - Staker 2 is earning 50 tokens per block.
+		assert_hypothetically_earned(staker1, 1016, pool_id, reward_asset_id.clone());
+		assert_hypothetically_earned(staker2, 483, pool_id, reward_asset_id.clone());
+
+		// Block 23: Staker 1 unstakes 100 tokens.
+		System::set_block_number(23);
+		assert_ok!(StakingRewards::unstake(RuntimeOrigin::signed(staker1), pool_id, 100, None));
+		// - Staker 1 has earned 1065 (1015 + 50) tokens.
+		// - Staker 2 has earned 533 (483 + 50) tokens.
+		// - Staker 1 is earning 0 tokens per block.
+		// - Staker 2 is earning 100 tokens per block.
+		assert_hypothetically_earned(staker1, 1066, pool_id, reward_asset_id.clone());
+		assert_hypothetically_earned(staker2, 533, pool_id, reward_asset_id.clone());
+
+		// Block 50: Stakers should only have earned 2 blocks worth of tokens (expiry is 25).
+		System::set_block_number(50);
+		// - Staker 1 has earned 1065 tokens.
+		// - Staker 2 has earned 733 (533 + 2 * 100) tokens.
+		// - Staker 1 is earning 0 tokens per block.
+		// - Staker 2 is earning 0 tokens per block.
+		assert_hypothetically_earned(staker1, 1066, pool_id, reward_asset_id.clone());
+		assert_hypothetically_earned(staker2, 733, pool_id, reward_asset_id.clone());
+
+		// Block 51: Extend the pool expiry block to 60.
+		System::set_block_number(51);
+		// - Staker 1 is earning 0 tokens per block.
+		// - Staker 2 is earning 100 tokens per block.
+		assert_ok!(StakingRewards::set_pool_expiry_block(
+			RuntimeOrigin::signed(admin),
+			pool_id,
+			DispatchTime::At(60u64),
+		));
+		assert_hypothetically_earned(staker1, 1066, pool_id, reward_asset_id.clone());
+		assert_hypothetically_earned(staker2, 733, pool_id, reward_asset_id.clone());
+
+		// Block 53: Check rewards are resumed.
+		// - Staker 1 has earned 1065 tokens.
+		// - Staker 2 has earned 933 (733 + 2 * 100) tokens.
+		// - Staker 2 is earning 100 tokens per block.
+		System::set_block_number(53);
+		assert_hypothetically_earned(staker1, 1066, pool_id, reward_asset_id.clone());
+		assert_hypothetically_earned(staker2, 933, pool_id, reward_asset_id.clone());
+
+		// Block 55: Increase the block reward.
+		// - Staker 1 has earned 1065 tokens.
+		// - Staker 2 has earned 1133 (933 + 2 * 100) tokens.
+		// - Staker 2 is earning 50 tokens per block.
+		System::set_block_number(55);
+		assert_ok!(StakingRewards::set_pool_reward_rate_per_block(
+			RuntimeOrigin::signed(admin),
+			pool_id,
+			150
+		));
+		assert_hypothetically_earned(staker1, 1066, pool_id, reward_asset_id.clone());
+		assert_hypothetically_earned(staker2, 1133, pool_id, reward_asset_id.clone());
+
+		// Block 57: Staker2 harvests their rewards.
+		System::set_block_number(57);
+		// - Staker 2 has earned 1433 (1133 + 2 * 150) tokens.
+		assert_hypothetically_earned(staker2, 1433, pool_id, reward_asset_id.clone());
+		// Get the pre-harvest balance.
+		let balance_before: <MockRuntime as Config>::Balance =
+			<<MockRuntime as Config>::Assets>::balance(reward_asset_id.clone(), &staker2);
+		assert_ok!(StakingRewards::harvest_rewards(RuntimeOrigin::signed(staker2), pool_id, None));
+		let balance_after =
+			<<MockRuntime as Config>::Assets>::balance(reward_asset_id.clone(), &staker2);
+		assert_eq!(balance_after - balance_before, 1433u128);
+
+		// Block 60: Check rewards were adjusted correctly.
+		// - Staker 1 has earned 1065 tokens.
+		// - Staker 2 has earned 450 (3 * 150) tokens.
+		System::set_block_number(60);
+		assert_hypothetically_earned(staker1, 1066, pool_id, reward_asset_id.clone());
+		assert_hypothetically_earned(staker2, 450, pool_id, reward_asset_id.clone());
+
+		// Finally, check events.
+		assert_eq!(
+			events(),
+			[
+				Event::PoolCreated {
+					creator: PermissionedAccountId::get(),
+					pool_id,
+					staked_asset_id,
+					reward_asset_id,
+					reward_rate_per_block: 100,
+					expiry_block: 25,
+					admin,
+				},
+				Event::Staked { staker: staker1, pool_id, amount: 100 },
+				Event::Staked { staker: staker2, pool_id, amount: 100 },
+				Event::Staked { staker: staker1, pool_id, amount: 100 },
+				Event::Unstaked { caller: staker1, staker: staker1, pool_id, amount: 100 },
+				Event::Unstaked { caller: staker1, staker: staker1, pool_id, amount: 100 },
+				Event::PoolExpiryBlockModified { pool_id, new_expiry_block: 60 },
+				Event::PoolRewardRateModified { pool_id, new_reward_rate_per_block: 150 },
+				Event::RewardsHarvested { caller: staker2, staker: staker2, pool_id, amount: 1433 }
+			]
+		);
+	});
+}
diff --git a/substrate/frame/asset-rewards/src/weights.rs b/substrate/frame/asset-rewards/src/weights.rs
new file mode 100644
index 00000000000..c9e2d0fd251
--- /dev/null
+++ b/substrate/frame/asset-rewards/src/weights.rs
@@ -0,0 +1,368 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Autogenerated weights for `pallet_asset_rewards`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! WORST CASE MAP SIZE: `1000000`
+//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024`
+
+// Executed Command:
+// target/production/substrate-node
+// benchmark
+// pallet
+// --steps=50
+// --repeat=20
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_asset_rewards
+// --chain=dev
+// --header=./substrate/HEADER-APACHE2
+// --output=./substrate/frame/asset-rewards/src/weights.rs
+// --template=./substrate/.maintain/frame-weight-template.hbs
+
+#![cfg_attr(rustfmt, rustfmt_skip)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+#![allow(missing_docs)]
+
+use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
+use core::marker::PhantomData;
+
+/// Weight functions needed for `pallet_asset_rewards`.
+pub trait WeightInfo {
+	fn create_pool() -> Weight;
+	fn stake() -> Weight;
+	fn unstake() -> Weight;
+	fn harvest_rewards() -> Weight;
+	fn set_pool_reward_rate_per_block() -> Weight;
+	fn set_pool_admin() -> Weight;
+	fn set_pool_expiry_block() -> Weight;
+	fn deposit_reward_tokens() -> Weight;
+	fn cleanup_pool() -> Weight;
+}
+
+/// Weights for `pallet_asset_rewards` using the Substrate node and recommended hardware.
+pub struct SubstrateWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	/// Storage: `Assets::Asset` (r:2 w:0)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::NextPoolId` (r:1 w:1)
+	/// Proof: `AssetRewards::NextPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(373), added: 2848, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolCost` (r:0 w:1)
+	/// Proof: `AssetRewards::PoolCost` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::Pools` (r:0 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	fn create_pool() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `495`
+		//  Estimated: `6360`
+		// Minimum execution time: 62_655_000 picoseconds.
+		Weight::from_parts(63_723_000, 6360)
+			.saturating_add(T::DbWeight::get().reads(5_u64))
+			.saturating_add(T::DbWeight::get().writes(5_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::Freezes` (r:1 w:1)
+	/// Proof: `AssetsFreezer::Freezes` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:1 w:0)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:1)
+	/// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	fn stake() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `935`
+		//  Estimated: `3615`
+		// Minimum execution time: 54_463_000 picoseconds.
+		Weight::from_parts(55_974_000, 3615)
+			.saturating_add(T::DbWeight::get().reads(5_u64))
+			.saturating_add(T::DbWeight::get().writes(4_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::Freezes` (r:1 w:1)
+	/// Proof: `AssetsFreezer::Freezes` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:1 w:0)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:1)
+	/// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	fn unstake() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `935`
+		//  Estimated: `3615`
+		// Minimum execution time: 55_749_000 picoseconds.
+		Weight::from_parts(57_652_000, 3615)
+			.saturating_add(T::DbWeight::get().reads(5_u64))
+			.saturating_add(T::DbWeight::get().writes(4_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:0)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Asset` (r:1 w:1)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:2 w:2)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	fn harvest_rewards() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `1021`
+		//  Estimated: `6208`
+		// Minimum execution time: 69_372_000 picoseconds.
+		Weight::from_parts(70_278_000, 6208)
+			.saturating_add(T::DbWeight::get().reads(5_u64))
+			.saturating_add(T::DbWeight::get().writes(4_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	fn set_pool_reward_rate_per_block() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `347`
+		//  Estimated: `3615`
+		// Minimum execution time: 19_284_000 picoseconds.
+		Weight::from_parts(19_791_000, 3615)
+			.saturating_add(T::DbWeight::get().reads(1_u64))
+			.saturating_add(T::DbWeight::get().writes(1_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	fn set_pool_admin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `347`
+		//  Estimated: `3615`
+		// Minimum execution time: 17_388_000 picoseconds.
+		Weight::from_parts(18_390_000, 3615)
+			.saturating_add(T::DbWeight::get().reads(1_u64))
+			.saturating_add(T::DbWeight::get().writes(1_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	fn set_pool_expiry_block() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `347`
+		//  Estimated: `3615`
+		// Minimum execution time: 19_780_000 picoseconds.
+		Weight::from_parts(20_676_000, 3615)
+			.saturating_add(T::DbWeight::get().reads(1_u64))
+			.saturating_add(T::DbWeight::get().writes(1_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:0)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Asset` (r:1 w:1)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:2 w:2)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	fn deposit_reward_tokens() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `840`
+		//  Estimated: `6208`
+		// Minimum execution time: 57_746_000 picoseconds.
+		Weight::from_parts(59_669_000, 6208)
+			.saturating_add(T::DbWeight::get().reads(5_u64))
+			.saturating_add(T::DbWeight::get().writes(4_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:0)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Asset` (r:1 w:1)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:2 w:2)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:2 w:2)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolCost` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolCost` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(373), added: 2848, mode: `MaxEncodedLen`)
+	fn cleanup_pool() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `1236`
+		//  Estimated: `6208`
+		// Minimum execution time: 110_443_000 picoseconds.
+		Weight::from_parts(113_149_000, 6208)
+			.saturating_add(T::DbWeight::get().reads(9_u64))
+			.saturating_add(T::DbWeight::get().writes(8_u64))
+	}
+}
+
+// For backwards compatibility and tests.
+impl WeightInfo for () {
+	/// Storage: `Assets::Asset` (r:2 w:0)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::NextPoolId` (r:1 w:1)
+	/// Proof: `AssetRewards::NextPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(373), added: 2848, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolCost` (r:0 w:1)
+	/// Proof: `AssetRewards::PoolCost` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::Pools` (r:0 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	fn create_pool() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `495`
+		//  Estimated: `6360`
+		// Minimum execution time: 62_655_000 picoseconds.
+		Weight::from_parts(63_723_000, 6360)
+			.saturating_add(RocksDbWeight::get().reads(5_u64))
+			.saturating_add(RocksDbWeight::get().writes(5_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::Freezes` (r:1 w:1)
+	/// Proof: `AssetsFreezer::Freezes` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:1 w:0)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:1)
+	/// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	fn stake() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `935`
+		//  Estimated: `3615`
+		// Minimum execution time: 54_463_000 picoseconds.
+		Weight::from_parts(55_974_000, 3615)
+			.saturating_add(RocksDbWeight::get().reads(5_u64))
+			.saturating_add(RocksDbWeight::get().writes(4_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::Freezes` (r:1 w:1)
+	/// Proof: `AssetsFreezer::Freezes` (`max_values`: None, `max_size`: Some(105), added: 2580, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:1 w:0)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:1)
+	/// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`)
+	fn unstake() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `935`
+		//  Estimated: `3615`
+		// Minimum execution time: 55_749_000 picoseconds.
+		Weight::from_parts(57_652_000, 3615)
+			.saturating_add(RocksDbWeight::get().reads(5_u64))
+			.saturating_add(RocksDbWeight::get().writes(4_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:0)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Asset` (r:1 w:1)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:2 w:2)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	fn harvest_rewards() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `1021`
+		//  Estimated: `6208`
+		// Minimum execution time: 69_372_000 picoseconds.
+		Weight::from_parts(70_278_000, 6208)
+			.saturating_add(RocksDbWeight::get().reads(5_u64))
+			.saturating_add(RocksDbWeight::get().writes(4_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	fn set_pool_reward_rate_per_block() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `347`
+		//  Estimated: `3615`
+		// Minimum execution time: 19_284_000 picoseconds.
+		Weight::from_parts(19_791_000, 3615)
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	fn set_pool_admin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `347`
+		//  Estimated: `3615`
+		// Minimum execution time: 17_388_000 picoseconds.
+		Weight::from_parts(18_390_000, 3615)
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	fn set_pool_expiry_block() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `347`
+		//  Estimated: `3615`
+		// Minimum execution time: 19_780_000 picoseconds.
+		Weight::from_parts(20_676_000, 3615)
+			.saturating_add(RocksDbWeight::get().reads(1_u64))
+			.saturating_add(RocksDbWeight::get().writes(1_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:0)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Asset` (r:1 w:1)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:2 w:2)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	fn deposit_reward_tokens() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `840`
+		//  Estimated: `6208`
+		// Minimum execution time: 57_746_000 picoseconds.
+		Weight::from_parts(59_669_000, 6208)
+			.saturating_add(RocksDbWeight::get().reads(5_u64))
+			.saturating_add(RocksDbWeight::get().writes(4_u64))
+	}
+	/// Storage: `AssetRewards::Pools` (r:1 w:1)
+	/// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(150), added: 2625, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolStakers` (r:1 w:0)
+	/// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Asset` (r:1 w:1)
+	/// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`)
+	/// Storage: `Assets::Account` (r:2 w:2)
+	/// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:2 w:2)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: `AssetRewards::PoolCost` (r:1 w:1)
+	/// Proof: `AssetRewards::PoolCost` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(373), added: 2848, mode: `MaxEncodedLen`)
+	fn cleanup_pool() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `1236`
+		//  Estimated: `6208`
+		// Minimum execution time: 110_443_000 picoseconds.
+		Weight::from_parts(113_149_000, 6208)
+			.saturating_add(RocksDbWeight::get().reads(9_u64))
+			.saturating_add(RocksDbWeight::get().writes(8_u64))
+	}
+}
diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs
index 728426cc84c..4a83c809a6a 100644
--- a/substrate/frame/support/src/traits.rs
+++ b/substrate/frame/support/src/traits.rs
@@ -96,8 +96,9 @@ mod storage;
 #[cfg(feature = "experimental")]
 pub use storage::MaybeConsideration;
 pub use storage::{
-	Consideration, Footprint, Incrementable, Instance, LinearStoragePrice, PartialStorageInfoTrait,
-	StorageInfo, StorageInfoTrait, StorageInstance, TrackedStorageKey, WhitelistedStorageKeys,
+	Consideration, ConstantStoragePrice, Footprint, Incrementable, Instance, LinearStoragePrice,
+	PartialStorageInfoTrait, StorageInfo, StorageInfoTrait, StorageInstance, TrackedStorageKey,
+	WhitelistedStorageKeys,
 };
 
 mod dispatch;
diff --git a/substrate/frame/support/src/traits/storage.rs b/substrate/frame/support/src/traits/storage.rs
index 2b8e4370738..676b73e03d3 100644
--- a/substrate/frame/support/src/traits/storage.rs
+++ b/substrate/frame/support/src/traits/storage.rs
@@ -200,6 +200,18 @@ where
 	}
 }
 
+/// Constant `Price` regardless of the given [`Footprint`].
+pub struct ConstantStoragePrice<Price, Balance>(PhantomData<(Price, Balance)>);
+impl<Price, Balance> Convert<Footprint, Balance> for ConstantStoragePrice<Price, Balance>
+where
+	Price: Get<Balance>,
+	Balance: From<u64> + sp_runtime::Saturating,
+{
+	fn convert(_: Footprint) -> Balance {
+		Price::get()
+	}
+}
+
 /// Some sort of cost taken from account temporarily in order to offset the cost to the chain of
 /// holding some data [`Footprint`] in state.
 ///
diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml
index 17a7c02e825..fc0b2d5a140 100644
--- a/umbrella/Cargo.toml
+++ b/umbrella/Cargo.toml
@@ -57,6 +57,7 @@ std = [
 	"pallet-asset-conversion-tx-payment?/std",
 	"pallet-asset-conversion?/std",
 	"pallet-asset-rate?/std",
+	"pallet-asset-rewards?/std",
 	"pallet-asset-tx-payment?/std",
 	"pallet-assets-freezer?/std",
 	"pallet-assets?/std",
@@ -256,6 +257,7 @@ runtime-benchmarks = [
 	"pallet-asset-conversion-tx-payment?/runtime-benchmarks",
 	"pallet-asset-conversion?/runtime-benchmarks",
 	"pallet-asset-rate?/runtime-benchmarks",
+	"pallet-asset-rewards?/runtime-benchmarks",
 	"pallet-asset-tx-payment?/runtime-benchmarks",
 	"pallet-assets-freezer?/runtime-benchmarks",
 	"pallet-assets?/runtime-benchmarks",
@@ -386,6 +388,7 @@ try-runtime = [
 	"pallet-asset-conversion-tx-payment?/try-runtime",
 	"pallet-asset-conversion?/try-runtime",
 	"pallet-asset-rate?/try-runtime",
+	"pallet-asset-rewards?/try-runtime",
 	"pallet-asset-tx-payment?/try-runtime",
 	"pallet-assets-freezer?/try-runtime",
 	"pallet-assets?/try-runtime",
@@ -543,7 +546,7 @@ with-tracing = [
 	"sp-tracing?/with-tracing",
 	"sp-tracing?/with-tracing",
 ]
-runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"]
+runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-rewards", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"]
 runtime = [
 	"frame-benchmarking",
 	"frame-benchmarking-pallet-pov",
@@ -870,6 +873,11 @@ default-features = false
 optional = true
 path = "../substrate/frame/asset-rate"
 
+[dependencies.pallet-asset-rewards]
+default-features = false
+optional = true
+path = "../substrate/frame/asset-rewards"
+
 [dependencies.pallet-asset-tx-payment]
 default-features = false
 optional = true
diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs
index 3504f081f29..a132f16a2c3 100644
--- a/umbrella/src/lib.rs
+++ b/umbrella/src/lib.rs
@@ -312,6 +312,10 @@ pub use pallet_asset_conversion_tx_payment;
 #[cfg(feature = "pallet-asset-rate")]
 pub use pallet_asset_rate;
 
+/// FRAME asset rewards pallet.
+#[cfg(feature = "pallet-asset-rewards")]
+pub use pallet_asset_rewards;
+
 /// pallet to manage transaction payments in assets.
 #[cfg(feature = "pallet-asset-tx-payment")]
 pub use pallet_asset_tx_payment;
-- 
GitLab


From 64abc745d9a7e7d6bea471e7bd2e895c503199c2 Mon Sep 17 00:00:00 2001
From: Giuseppe Re <giuseppe.re@parity.io>
Date: Thu, 16 Jan 2025 15:00:59 +0100
Subject: [PATCH 068/116] Update `parity-publish` to v0.10.4 (#7193)

The changes from v0.10.3 are only related to dependencies version. This
should fix some failing CIs.

This PR also updates the Rust cache version in CI.
---
 .github/workflows/check-semver.yml          | 2 +-
 .github/workflows/publish-check-compile.yml | 6 ++++--
 .github/workflows/publish-check-crates.yml  | 4 ++--
 .github/workflows/publish-claim-crates.yml  | 4 ++--
 4 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml
index 0da3e54ef60..43c70d6abc7 100644
--- a/.github/workflows/check-semver.yml
+++ b/.github/workflows/check-semver.yml
@@ -81,7 +81,7 @@ jobs:
       - name: install parity-publish
         if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }}
         # Set the target dir to cache the build.
-        run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.10.3 --locked -q
+        run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.10.4 --locked -q
 
       - name: check semver
         if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }}
diff --git a/.github/workflows/publish-check-compile.yml b/.github/workflows/publish-check-compile.yml
index ce1b2cb231d..f20909106a8 100644
--- a/.github/workflows/publish-check-compile.yml
+++ b/.github/workflows/publish-check-compile.yml
@@ -26,12 +26,14 @@ jobs:
       - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7
 
       - name: Rust Cache
-        uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5
+        uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7
         with:
           cache-on-failure: true
 
       - name: install parity-publish
-        run: cargo install parity-publish@0.10.3 --locked -q
+        run: |
+          rustup override set 1.82.0
+          cargo install parity-publish@0.10.4 --locked -q
 
       - name: parity-publish update plan
         run: parity-publish --color always plan --skip-check --prdoc prdoc/
diff --git a/.github/workflows/publish-check-crates.yml b/.github/workflows/publish-check-crates.yml
index 3150cb9dd40..c1b13243ba1 100644
--- a/.github/workflows/publish-check-crates.yml
+++ b/.github/workflows/publish-check-crates.yml
@@ -19,12 +19,12 @@ jobs:
       - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7
 
       - name: Rust Cache
-        uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5
+        uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7
         with:
           cache-on-failure: true
 
       - name: install parity-publish
-        run: cargo install parity-publish@0.10.3 --locked -q
+        run: cargo install parity-publish@0.10.4 --locked -q
 
       - name: parity-publish check
         run: parity-publish --color always check --allow-unpublished
diff --git a/.github/workflows/publish-claim-crates.yml b/.github/workflows/publish-claim-crates.yml
index a6efc8a5599..804baf9ff06 100644
--- a/.github/workflows/publish-claim-crates.yml
+++ b/.github/workflows/publish-claim-crates.yml
@@ -13,12 +13,12 @@ jobs:
       - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7
 
       - name: Rust Cache
-        uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5
+        uses: Swatinem/rust-cache@f0deed1e0edfc6a9be95417288c0e1099b1eeec3 # v2.7.7
         with:
           cache-on-failure: true
 
       - name: install parity-publish
-        run: cargo install parity-publish@0.10.3 --locked -q
+        run: cargo install parity-publish@0.10.4 --locked -q
 
       - name: parity-publish claim
         env:
-- 
GitLab


From f7baa84f48aa72b96e8c9a9ec8a1934431de6709 Mon Sep 17 00:00:00 2001
From: Dastan <88332432+dastansam@users.noreply.github.com>
Date: Thu, 16 Jan 2025 21:12:41 +0600
Subject: [PATCH 069/116] [FRAME] `pallet_asset_tx_payment`: replace `AssetId`
 bound from `Copy` to `Clone` (#7194)

closes https://github.com/paritytech/polkadot-sdk/issues/6911
---
 prdoc/pr_7194.prdoc                               | 15 +++++++++++++++
 .../asset-tx-payment/src/lib.rs                   |  6 +++---
 .../asset-tx-payment/src/payment.rs               | 15 +++++++++------
 3 files changed, 27 insertions(+), 9 deletions(-)
 create mode 100644 prdoc/pr_7194.prdoc

diff --git a/prdoc/pr_7194.prdoc b/prdoc/pr_7194.prdoc
new file mode 100644
index 00000000000..3a9db46ceae
--- /dev/null
+++ b/prdoc/pr_7194.prdoc
@@ -0,0 +1,15 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: '[FRAME] `pallet_asset_tx_payment`: replace `AssetId` bound from `Copy` to `Clone`'
+
+doc:
+  - audience: Runtime Dev
+    description: |
+      `OnChargeAssetTransaction`'s associated type `AssetId` is bounded by `Copy` which makes it impossible
+      to use `staging_xcm::v4::Location` as `AssetId`. This PR bounds `AssetId` to `Clone` instead, which is 
+      more lenient.
+
+crates:
+  - name: pallet-asset-tx-payment
+    bump: minor
diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs
index dd752989c36..4a96cbcacb5 100644
--- a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs
+++ b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs
@@ -202,7 +202,7 @@ where
 		debug_assert!(self.tip <= fee, "tip should be included in the computed fee");
 		if fee.is_zero() {
 			Ok((fee, InitialPayment::Nothing))
-		} else if let Some(asset_id) = self.asset_id {
+		} else if let Some(asset_id) = self.asset_id.clone() {
 			T::OnChargeAssetTransaction::withdraw_fee(
 				who,
 				call,
@@ -233,7 +233,7 @@ where
 		debug_assert!(self.tip <= fee, "tip should be included in the computed fee");
 		if fee.is_zero() {
 			Ok(())
-		} else if let Some(asset_id) = self.asset_id {
+		} else if let Some(asset_id) = self.asset_id.clone() {
 			T::OnChargeAssetTransaction::can_withdraw_fee(
 				who,
 				call,
@@ -358,7 +358,7 @@ where
 					tip,
 					who,
 					initial_payment,
-					asset_id: self.asset_id,
+					asset_id: self.asset_id.clone(),
 					weight: self.weight(call),
 				})
 			},
diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs
index 2074b1476f4..7b7ae855bf8 100644
--- a/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs
+++ b/substrate/frame/transaction-payment/asset-tx-payment/src/payment.rs
@@ -40,7 +40,7 @@ pub trait OnChargeAssetTransaction<T: Config> {
 	/// The underlying integer type in which fees are calculated.
 	type Balance: Balance;
 	/// The type used to identify the assets used for transaction payment.
-	type AssetId: FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default + Eq + TypeInfo;
+	type AssetId: FullCodec + Clone + MaybeSerializeDeserialize + Debug + Default + Eq + TypeInfo;
 	/// The type used to store the intermediate values between pre- and post-dispatch.
 	type LiquidityInfo;
 
@@ -112,7 +112,7 @@ where
 	T: Config,
 	CON: ConversionToAssetBalance<BalanceOf<T>, AssetIdOf<T>, AssetBalanceOf<T>>,
 	HC: HandleCredit<T::AccountId, T::Fungibles>,
-	AssetIdOf<T>: FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default + Eq + TypeInfo,
+	AssetIdOf<T>: FullCodec + Clone + MaybeSerializeDeserialize + Debug + Default + Eq + TypeInfo,
 {
 	type Balance = BalanceOf<T>;
 	type AssetId = AssetIdOf<T>;
@@ -133,11 +133,14 @@ where
 		// less than one (e.g. 0.5) but gets rounded down by integer division we introduce a minimum
 		// fee.
 		let min_converted_fee = if fee.is_zero() { Zero::zero() } else { One::one() };
-		let converted_fee = CON::to_asset_balance(fee, asset_id)
+		let converted_fee = CON::to_asset_balance(fee, asset_id.clone())
 			.map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment))?
 			.max(min_converted_fee);
-		let can_withdraw =
-			<T::Fungibles as Inspect<T::AccountId>>::can_withdraw(asset_id, who, converted_fee);
+		let can_withdraw = <T::Fungibles as Inspect<T::AccountId>>::can_withdraw(
+			asset_id.clone(),
+			who,
+			converted_fee,
+		);
 		if can_withdraw != WithdrawConsequence::Success {
 			return Err(InvalidTransaction::Payment.into())
 		}
@@ -167,7 +170,7 @@ where
 		// less than one (e.g. 0.5) but gets rounded down by integer division we introduce a minimum
 		// fee.
 		let min_converted_fee = if fee.is_zero() { Zero::zero() } else { One::one() };
-		let converted_fee = CON::to_asset_balance(fee, asset_id)
+		let converted_fee = CON::to_asset_balance(fee, asset_id.clone())
 			.map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment))?
 			.max(min_converted_fee);
 		let can_withdraw =
-- 
GitLab


From 77ad8abb4a3aada3362fc4d5780db1844cc2e15d Mon Sep 17 00:00:00 2001
From: Javier Viola <363911+pepoviola@users.noreply.github.com>
Date: Thu, 16 Jan 2025 13:09:24 -0300
Subject: [PATCH 070/116] Migrate substrate zombienet test poc (#7178)

Zombienet substrate tests PoC (using native provider).

cc: @emamihe @alvicsam
---
 .github/workflows/build-publish-images.yml    |  47 +++---
 .../zombienet-reusable-preflight.yml          | 145 ++++++++++++++++++
 .github/workflows/zombienet_substrate.yml     |  45 ++++++
 .github/zombienet-env                         |   9 ++
 4 files changed, 223 insertions(+), 23 deletions(-)
 create mode 100644 .github/workflows/zombienet-reusable-preflight.yml
 create mode 100644 .github/workflows/zombienet_substrate.yml
 create mode 100644 .github/zombienet-env

diff --git a/.github/workflows/build-publish-images.yml b/.github/workflows/build-publish-images.yml
index 874b5d37469..deb3b3df5ff 100644
--- a/.github/workflows/build-publish-images.yml
+++ b/.github/workflows/build-publish-images.yml
@@ -53,7 +53,7 @@ jobs:
       - name: pack artifacts
         run: |
           mkdir -p ./artifacts
-          VERSION="${{ needs.preflight.outputs.SOURCE_REF_NAME }}" # will be tag or branch name
+          VERSION="${{ needs.preflight.outputs.SOURCE_REF_SLUG }}" # will be tag or branch name
           mv ./target/testnet/polkadot ./artifacts/.
           mv ./target/testnet/polkadot-prepare-worker ./artifacts/.
           mv ./target/testnet/polkadot-execute-worker ./artifacts/.
@@ -62,7 +62,7 @@ jobs:
           sha256sum polkadot | tee polkadot.sha256
           shasum -c polkadot.sha256
           cd ../
-          EXTRATAG="${{ needs.preflight.outputs.SOURCE_REF_NAME }}-${COMMIT_SHA}"
+          EXTRATAG="${{ needs.preflight.outputs.SOURCE_REF_SLUG }}-${COMMIT_SHA}"
           echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})"
           echo -n ${VERSION} > ./artifacts/VERSION
           echo -n ${EXTRATAG} > ./artifacts/EXTRATAG
@@ -77,7 +77,7 @@ jobs:
       - name: upload artifacts
         uses: actions/upload-artifact@v4
         with:
-          name: ${{ github.job }}-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: ${{ github.job }}-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
           path: artifacts.tar
           retention-days: 1
 
@@ -103,7 +103,7 @@ jobs:
           mkdir -p ./artifacts
           mv ./target/release/polkadot-parachain ./artifacts/.
           echo "___The VERSION is either a tag name or the curent branch if triggered not by a tag___"
-          echo ${{ needs.preflight.outputs.SOURCE_REF_NAME }} | tee ./artifacts/VERSION
+          echo ${{ needs.preflight.outputs.SOURCE_REF_SLUG }} | tee ./artifacts/VERSION
 
       - name: tar
         run: tar -cvf artifacts.tar artifacts
@@ -111,7 +111,7 @@ jobs:
       - name: upload artifacts
         uses: actions/upload-artifact@v4
         with:
-          name: ${{ github.job }}-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: ${{ github.job }}-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
           path: artifacts.tar
           retention-days: 1
 
@@ -147,7 +147,7 @@ jobs:
       - name: upload artifacts
         uses: actions/upload-artifact@v4
         with:
-          name: ${{ github.job }}-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: ${{ github.job }}-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
           path: artifacts.tar
           retention-days: 1
 
@@ -172,8 +172,8 @@ jobs:
           mkdir -p ./artifacts
           mv ./target/testnet/adder-collator ./artifacts/.
           mv ./target/testnet/undying-collator ./artifacts/.
-          echo -n "${{ needs.preflight.outputs.SOURCE_REF_NAME }}" > ./artifacts/VERSION
-          echo -n "${{ needs.preflight.outputs.SOURCE_REF_NAME }}-${COMMIT_SHA}" > ./artifacts/EXTRATAG
+          echo -n "${{ needs.preflight.outputs.SOURCE_REF_SLUG }}" > ./artifacts/VERSION
+          echo -n "${{ needs.preflight.outputs.SOURCE_REF_SLUG }}-${COMMIT_SHA}" > ./artifacts/EXTRATAG
           echo "adder-collator version = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))"
           echo "undying-collator version = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))"
           cp -r ./docker/* ./artifacts
@@ -184,7 +184,7 @@ jobs:
       - name: upload artifacts
         uses: actions/upload-artifact@v4
         with:
-          name: ${{ github.job }}-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: ${{ github.job }}-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
           path: artifacts.tar
           retention-days: 1
 
@@ -209,8 +209,8 @@ jobs:
           mv ./target/testnet/malus ./artifacts/.
           mv ./target/testnet/polkadot-execute-worker ./artifacts/.
           mv ./target/testnet/polkadot-prepare-worker ./artifacts/.
-          echo -n "${{ needs.preflight.outputs.SOURCE_REF_NAME }}" > ./artifacts/VERSION
-          echo -n "${{ needs.preflight.outputs.SOURCE_REF_NAME }}-${COMMIT_SHA}" > ./artifacts/EXTRATAG
+          echo -n "${{ needs.preflight.outputs.SOURCE_REF_SLUG }}" > ./artifacts/VERSION
+          echo -n "${{ needs.preflight.outputs.SOURCE_REF_SLUG }}-${COMMIT_SHA}" > ./artifacts/EXTRATAG
           echo "polkadot-test-malus = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))"
           cp -r ./docker/* ./artifacts
 
@@ -220,7 +220,7 @@ jobs:
       - name: upload artifacts
         uses: actions/upload-artifact@v4
         with:
-          name: ${{ github.job }}-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: ${{ github.job }}-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
           path: artifacts.tar
           retention-days: 1
 
@@ -246,6 +246,7 @@ jobs:
           WASM_BUILD_NO_COLOR=1 forklift cargo build --locked --release -p staging-node-cli
           ls -la target/release/
       - name: pack artifacts
+        shell: bash
         run: |
           mv target/release/substrate-node ./artifacts/substrate/substrate
           echo -n "Substrate version = "
@@ -264,7 +265,7 @@ jobs:
       - name: upload artifacts
         uses: actions/upload-artifact@v4
         with:
-          name: ${{ github.job }}-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: ${{ github.job }}-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
           path: artifacts.tar
           retention-days: 1
 
@@ -294,7 +295,7 @@ jobs:
       - name: upload artifacts
         uses: actions/upload-artifact@v4
         with:
-          name: ${{ github.job }}-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: ${{ github.job }}-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
           path: artifacts.tar
           retention-days: 1
 
@@ -313,7 +314,7 @@ jobs:
 
       - uses: actions/download-artifact@v4.1.8
         with:
-          name: build-test-parachain-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: build-test-parachain-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
 
       - name: tar
         run: tar -xvf artifacts.tar
@@ -337,7 +338,7 @@ jobs:
 
       - uses: actions/download-artifact@v4.1.8
         with:
-          name: build-linux-stable-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: build-linux-stable-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
 
       - name: tar
         run: tar -xvf artifacts.tar
@@ -361,7 +362,7 @@ jobs:
 
       - uses: actions/download-artifact@v4.1.8
         with:
-          name: build-test-collators-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: build-test-collators-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
 
       - name: tar
         run: tar -xvf artifacts.tar
@@ -385,7 +386,7 @@ jobs:
 
       - uses: actions/download-artifact@v4.1.8
         with:
-          name: build-malus-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: build-malus-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
 
       - name: tar
         run: tar -xvf artifacts.tar
@@ -409,7 +410,7 @@ jobs:
 
       - uses: actions/download-artifact@v4.1.8
         with:
-          name: build-linux-substrate-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: build-linux-substrate-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
 
       - name: tar
         run: tar -xvf artifacts.tar
@@ -441,7 +442,7 @@ jobs:
 
       - uses: actions/download-artifact@v4.1.8
         with:
-          name: build-linux-stable-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: build-linux-stable-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
       - name: tar
         run: |
           tar -xvf artifacts.tar
@@ -449,7 +450,7 @@ jobs:
 
       - uses: actions/download-artifact@v4.1.8
         with:
-          name: build-linux-stable-cumulus-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: build-linux-stable-cumulus-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
       - name: tar
         run: |
           tar -xvf artifacts.tar
@@ -457,7 +458,7 @@ jobs:
 
       - uses: actions/download-artifact@v4.1.8
         with:
-          name: prepare-bridges-zombienet-artifacts-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: prepare-bridges-zombienet-artifacts-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
       - name: tar
         run: |
           tar -xvf artifacts.tar
@@ -482,7 +483,7 @@ jobs:
 
       - uses: actions/download-artifact@v4.1.8
         with:
-          name: build-linux-stable-cumulus-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+          name: build-linux-stable-cumulus-${{ needs.preflight.outputs.SOURCE_REF_SLUG }}
 
       - name: tar
         run: tar -xvf artifacts.tar
diff --git a/.github/workflows/zombienet-reusable-preflight.yml b/.github/workflows/zombienet-reusable-preflight.yml
new file mode 100644
index 00000000000..8e938567d81
--- /dev/null
+++ b/.github/workflows/zombienet-reusable-preflight.yml
@@ -0,0 +1,145 @@
+# Reusable workflow to set various useful variables
+# and to perform checks and generate conditions for other workflows.
+# Currently it checks if any Rust (build-related) file is changed
+# and if the current (caller) workflow file is changed.
+# Example:
+#
+# jobs:
+#   preflight:
+#     uses: ./.github/workflows/reusable-preflight.yml
+#   some-job:
+#     needs: changes
+#     if: ${{ needs.preflight.outputs.changes_rust }}
+#  .......
+
+name: Zombienet Preflight
+
+on:
+  workflow_call:
+    # Map the workflow outputs to job outputs
+    outputs:
+      changes_substrate:
+        value: ${{ jobs.preflight.outputs.changes_substrate }}
+
+      ZOMBIENET_IMAGE:
+        value: ${{ jobs.preflight.outputs.ZOMBIENET_IMAGE }}
+        description: "ZOMBIENET CI image"
+
+      ZOMBIENET_RUNNER:
+        value: ${{ jobs.preflight.outputs.ZOMBIENET_RUNNER }}
+        description: |
+          Main runner for zombienet tests.
+
+      DOCKER_IMAGES_VERSION:
+        value: ${{ jobs.preflight.outputs.DOCKER_IMAGES_VERSION }}
+        description: |
+          Version for temp docker images.
+
+      # Global vars (from global preflight)
+      SOURCE_REF_SLUG:
+        value: ${{ jobs.global_preflight.outputs.SOURCE_REF_SLUG }}
+
+      # Zombie vars
+      PUSHGATEWAY_URL:
+        value: ${{ jobs.preflight.outputs.PUSHGATEWAY_URL }}
+        description: "Gateway (url) to push metrics related to test."
+      DEBUG:
+        value: ${{ jobs.preflight.outputs.DEBUG }}
+        description: "Debug value to zombienet v1 tests."
+      ZOMBIE_PROVIDER:
+        value: ${{ jobs.preflight.outputs.ZOMBIE_PROVIDER }}
+        description: "Provider to use in zombienet-sdk tests."
+      RUST_LOG:
+        value: ${{ jobs.preflight.outputs.RUST_LOG }}
+        description: "Log value to use in zombinet-sdk tests."
+      RUN_IN_CI:
+        value: ${{ jobs.preflight.outputs.RUN_IN_CI }}
+        description: "Internal flag to make zombienet aware of the env."
+
+      KUBERNETES_CPU_REQUEST:
+        value: ${{ jobs.preflight.outputs.KUBERNETES_CPU_REQUEST }}
+        description: "Base cpu (request) for pod runner."
+
+      KUBERNETES_MEMORY_REQUEST:
+        value: ${{ jobs.preflight.outputs.KUBERNETES_MEMORY_REQUEST }}
+        description: "Base memory (request) for pod runner."
+
+jobs:
+  global_preflight:
+    uses: ./.github/workflows/reusable-preflight.yml
+
+  #
+  #
+  #
+  preflight:
+    runs-on: ubuntu-latest
+    outputs:
+      changes_substrate: ${{ steps.set_changes.outputs.substrate_any_changed || steps.set_changes.outputs.currentWorkflow_any_changed }}
+
+      ZOMBIENET_IMAGE: ${{ steps.set_vars.outputs.ZOMBIENET_IMAGE }}
+      ZOMBIENET_RUNNER: ${{ steps.set_vars.outputs.ZOMBIENET_RUNNER }}
+
+      DOCKER_IMAGES_VERSION: ${{ steps.set_images_version.outputs.ZOMBIENET_RUNNER }}
+
+      # common vars
+      PUSHGATEWAY_URL: ${{ steps.set_vars.outputs.PUSHGATEWAY_URL }}
+      DEBUG: ${{ steps.set_vars.outputs.DEBUG }}
+      ZOMBIE_PROVIDER: ${{ steps.set_vars.outputs.ZOMBIE_PROVIDER }}
+      RUST_LOG: ${{ steps.set_vars.outputs.RUST_LOG }}
+      RUN_IN_CI: ${{ steps.set_vars.outputs.RUN_IN_CI }}
+      KUBERNETES_CPU_REQUEST: ${{ steps.set_vars.outputs.KUBERNETES_CPU_REQUEST }}
+      KUBERNETES_MEMORY_REQUEST: ${{ steps.set_vars.outputs.KUBERNETES_MEMORY_REQUEST }}
+
+    steps:
+
+      - uses: actions/checkout@v4
+
+      #
+      # Set changes
+      #
+      - name: Current file
+        id: current_file
+        shell: bash
+        run: |
+          echo "currentWorkflowFile=$(echo ${{ github.workflow_ref }} | sed -nE "s/.*(\.github\/workflows\/[a-zA-Z0-9_-]*\.y[a]?ml)@refs.*/\1/p")" >> $GITHUB_OUTPUT
+          echo "currentActionDir=$(echo ${{ github.action_path }} | sed -nE "s/.*(\.github\/actions\/[a-zA-Z0-9_-]*)/\1/p")" >> $GITHUB_OUTPUT
+
+      - name: Set changes
+        id: set_changes
+        uses: tj-actions/changed-files@v45
+        with:
+          files_yaml: |
+            substrate:
+              - 'substrate/**/*'
+            currentWorkflow:
+              - '${{ steps.current_file.outputs.currentWorkflowFile }}'
+              - '.github/workflows/zombienet-reusable-preflight.yml'
+              - '.github/zombienet-env'
+
+
+      #
+      # Set environment vars (including runner/image)
+      #
+      - name: Set vars
+        id: set_vars
+        shell: bash
+        run: cat .github/env >> $GITHUB_OUTPUT
+
+
+      #
+      #
+      #
+      - name: Set docker images version
+        id: set_images_version
+        shell: bash
+        run: |
+          export BRANCH_NAME=${{ github.head_ref || github.ref_name }}
+          export DOCKER_IMAGES_VERSION=${BRANCH_NAME/\//-}
+          if [[ ${{ github.event_name }} == "merge_group" ]]; then export DOCKER_IMAGES_VERSION="${GITHUB_SHA::8}"; fi
+          echo "DOCKER_IMAGES_VERSION=${DOCKER_IMAGES_VERSION}" >> $GITHUB_OUTPUT
+
+      - name: log
+        shell: bash
+        run: |
+          echo "workflow file: ${{ steps.current_file.outputs.currentWorkflowFile }}"
+          echo "Modified: ${{ steps.set_changes.outputs.modified_keys }}"
\ No newline at end of file
diff --git a/.github/workflows/zombienet_substrate.yml b/.github/workflows/zombienet_substrate.yml
new file mode 100644
index 00000000000..823679d67d5
--- /dev/null
+++ b/.github/workflows/zombienet_substrate.yml
@@ -0,0 +1,45 @@
+name: Zombienet Substrate
+
+on:
+  workflow_run:
+    workflows: [Build and push images]
+    types: [completed]
+  merge_group:
+  workflow_dispatch:
+concurrency:
+  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+  cancel-in-progress: true
+
+jobs:
+  preflight:
+    uses: ./.github/workflows/zombienet-reusable-preflight.yml
+
+  zombienet-substrate-0000-block-building:
+    needs: [preflight]
+    # only run if we have changes in ./substrate directory and the build workflow already finish with success status.
+    if: ${{ needs.preflight.outputs.changes_substrate && github.event.workflow_run.conclusion == 'success' }}
+    runs-on: ${{ needs.preflight.outputs.ZOMBIENET_RUNNER }}
+    timeout-minutes: 60
+    container:
+      image: ${{ needs.preflight.outputs.ZOMBIENET_IMAGE }}
+    env:
+      FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR: 1
+      LOCAL_DIR: "./substrate/zombienet"
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v4
+
+      - uses: actions/download-artifact@v4.1.8
+        with:
+          name: build-linux-substrate-${{ needs.preflight.outputs.SOURCE_REF_NAME }}
+
+      - name: script
+        run: |
+          DEBUG=${{ needs.preflight.outputs.DEBUG }} zombie -p native ${LOCAL_DIR}/0000-block-building/block-building.zndsl
+
+      - name: upload logs
+        uses: actions/upload-artifact@v4
+        with:
+          name: zombienet-logs-scale-net
+          path: |
+            /tmp/zombie*/logs/*
diff --git a/.github/zombienet-env b/.github/zombienet-env
new file mode 100644
index 00000000000..e6da1a49c4b
--- /dev/null
+++ b/.github/zombienet-env
@@ -0,0 +1,9 @@
+    ZOMBIENET_IMAGE="docker.io/paritytech/zombienet:v1.3.116"
+    ZOMBIE_RUNNER="zombienet-arc-runner"
+    PUSHGATEWAY_URL="http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics"
+    DEBUG="zombie,zombie::network-node,zombie::kube::client::logs"
+    ZOMBIE_PROVIDER="k8s"
+    RUST_LOG="info,zombienet_orchestrator=debug"
+    RUN_IN_CI="1"
+    KUBERNETES_CPU_REQUEST="512m"
+    KUBERNETES_MEMORY_REQUEST="1Gi"
-- 
GitLab


From e056586b96f2bf8d2f0abe44bd91184d2dfd8bf3 Mon Sep 17 00:00:00 2001
From: chloefeal <188809157+chloefeal@users.noreply.github.com>
Date: Fri, 17 Jan 2025 03:33:59 +0800
Subject: [PATCH 071/116] chore: fix typos (#6999)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

✄
-----------------------------------------------------------------------------

Thank you for your Pull Request! 🙏 Please make sure it follows the
contribution guidelines outlined in [this

document](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md)
and fill out the
sections below. Once you're ready to submit your PR for review, please
delete this section and leave only the text under
the "Description" heading.

# Description

Hello, I fix some typos in logs and comments. Thank you very much.


## Integration

*In depth notes about how this PR should be integrated by downstream
projects. This part is mandatory, and should be
reviewed by reviewers, if the PR does NOT have the `R0-Silent` label. In
case of a `R0-Silent`, it can be ignored.*

## Review Notes

*In depth notes about the **implementation** details of your PR. This
should be the main guide for reviewers to
understand your approach and effectively review it. If too long, use

[`<details>`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/details)*.

*Imagine that someone who is depending on the old code wants to
integrate your new code and the only information that
they get is this section. It helps to include example usage and default
value here, with a `diff` code-block to show
possibly integration.*

*Include your leftover TODOs, if any, here.*

# Checklist

* [ ] My PR includes a detailed description as outlined in the
"Description" and its two subsections above.
* [ ] My PR follows the [labeling requirements](

https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md#Process
) of this project (at minimum one label for `T` required)
* External contributors: ask maintainers to put the right label on your
PR.
* [ ] I have made corresponding changes to the documentation (if
applicable)
* [ ] I have added tests that prove my fix is effective or that my
feature works (if applicable)

You can remove the "Checklist" section once all have been checked. Thank
you for your contribution!

✄
-----------------------------------------------------------------------------

Signed-off-by: chloefeal <188809157+chloefeal@users.noreply.github.com>
---
 bridges/relays/lib-substrate-relay/src/error.rs    | 2 +-
 bridges/relays/messages/src/message_lane_loop.rs   | 2 +-
 bridges/snowbridge/primitives/core/src/location.rs | 4 ++--
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/bridges/relays/lib-substrate-relay/src/error.rs b/bridges/relays/lib-substrate-relay/src/error.rs
index 2ebd9130f39..3a62f30838c 100644
--- a/bridges/relays/lib-substrate-relay/src/error.rs
+++ b/bridges/relays/lib-substrate-relay/src/error.rs
@@ -47,7 +47,7 @@ pub enum Error<Hash: Debug + MaybeDisplay, HeaderNumber: Debug + MaybeDisplay> {
 	#[error("Failed to guess initial {0} GRANDPA authorities set id: checked all possible ids in range [0; {1}]")]
 	GuessInitialAuthorities(&'static str, HeaderNumber),
 	/// Failed to retrieve GRANDPA authorities at the given header from the source chain.
-	#[error("Failed to retrive {0} GRANDPA authorities set at header {1}: {2:?}")]
+	#[error("Failed to retrieve {0} GRANDPA authorities set at header {1}: {2:?}")]
 	RetrieveAuthorities(&'static str, Hash, client::Error),
 	/// Failed to decode GRANDPA authorities at the given header of the source chain.
 	#[error("Failed to decode {0} GRANDPA authorities set at header {1}: {2:?}")]
diff --git a/bridges/relays/messages/src/message_lane_loop.rs b/bridges/relays/messages/src/message_lane_loop.rs
index 36de637f04c..cdc94b9fae4 100644
--- a/bridges/relays/messages/src/message_lane_loop.rs
+++ b/bridges/relays/messages/src/message_lane_loop.rs
@@ -1041,7 +1041,7 @@ pub(crate) mod tests {
 	#[test]
 	fn message_lane_loop_is_able_to_recover_from_unsuccessful_transaction() {
 		// with this configuration, both source and target clients will mine their transactions, but
-		// their corresponding nonce won't be udpated => reconnect will happen
+		// their corresponding nonce won't be updated => reconnect will happen
 		let (exit_sender, exit_receiver) = unbounded();
 		let result = run_loop_test(
 			Arc::new(Mutex::new(TestClientData {
diff --git a/bridges/snowbridge/primitives/core/src/location.rs b/bridges/snowbridge/primitives/core/src/location.rs
index f49a245c412..eb5ac66d46d 100644
--- a/bridges/snowbridge/primitives/core/src/location.rs
+++ b/bridges/snowbridge/primitives/core/src/location.rs
@@ -206,7 +206,7 @@ mod tests {
 		for token in token_locations {
 			assert!(
 				TokenIdOf::convert_location(&token).is_some(),
-				"Valid token = {token:?} yeilds no TokenId."
+				"Valid token = {token:?} yields no TokenId."
 			);
 		}
 
@@ -220,7 +220,7 @@ mod tests {
 		for token in non_token_locations {
 			assert!(
 				TokenIdOf::convert_location(&token).is_none(),
-				"Invalid token = {token:?} yeilds a TokenId."
+				"Invalid token = {token:?} yields a TokenId."
 			);
 		}
 	}
-- 
GitLab


From f5673cf260ba08fbc04667b1fcd0e635d87e6451 Mon Sep 17 00:00:00 2001
From: Ankan <10196091+Ank4n@users.noreply.github.com>
Date: Thu, 16 Jan 2025 20:32:53 +0100
Subject: [PATCH 072/116] [Staking] Currency <> Fungible migration (#5501)

Migrate staking currency from `traits::LockableCurrency` to
`traits::fungible::holds`.

Resolves part of https://github.com/paritytech/polkadot-sdk/issues/226.

## Changes
### Nomination Pool
TransferStake is now incompatible with fungible migration as old pools
were not meant to have additional ED. Since they are anyways deprecated,
removed its usage from all test runtimes.

### Staking
- Config: `Currency` becomes of type `Fungible` while `OldCurrency` is
the `LockableCurrency` used before.
- Lazy migration of accounts. Any ledger update will create a new hold
with no extra reads/writes. A permissionless extrinsic
`migrate_currency()` releases the old `lock` along with some
housekeeping.
- Staking now requires ED to be left free. It also adds no consumer to
staking accounts.
- If hold cannot be applied to all stake, the un-holdable part is force
withdrawn from the ledger.

### Delegated Staking
The pallet does not add provider for agents anymore.

## Migration stats
### Polkadot
Total accounts that can be migrated: 59564
Accounts failing to migrate: 0
Accounts with stake force withdrawn greater than ED: 59
Total force withdrawal: 29591.26 DOT

### Kusama
Total accounts that can be migrated: 26311
Accounts failing to migrate: 0
Accounts with stake force withdrawn greater than ED: 48
Total force withdrawal: 1036.05 KSM


[Full logs here](https://hackmd.io/@ak0n/BklDuFra0).

## Note about locks (freeze) vs holds
With locks or freezes, staking could use total balance of an account.
But with holds, the account needs to be left with at least Existential
Deposit in free balance. This would also affect nomination pools which
till now has been able to stake all funds contributed to it. An
alternate version of this PR is
https://github.com/paritytech/polkadot-sdk/pull/5658 where staking
pallet does not add any provider, but means pools and delegated-staking
pallet has to provide for these accounts and makes the end to end logic
(of provider and consumer ref) lot less intuitive and prone to bug.

This PR now introduces requirement for stakers to maintain ED in their
free balance. This helps with removing the bug prone incrementing and
decrementing of consumers and providers.

## TODO
- [x] Test: Vesting + governance locked funds can be staked.
- [ ] can `Call::restore_ledger` be removed? @gpestana
- [x] Ensure unclaimed withdrawals is not affected by no provider for
pool accounts.
- [x] Investigate kusama accounts with balance between 0 and ED.
- [x] Permissionless call to release lock.
- [x] Migration of consumer (dec) and provider (inc) for direct stakers.
- [x] force unstake if hold cannot be applied to all stake.
- [x] Fix try state checks (it thinks nothing is staked for unmigrated
ledgers).
- [x] Bench `migrate_currency`.
- [x] Virtual Staker migration test.
- [x] Ensure total issuance is upto date when minting rewards.

## Followup
- https://github.com/paritytech/polkadot-sdk/issues/5742

---------

Co-authored-by: command-bot <>
---
 Cargo.lock                                    |  24 +-
 Cargo.toml                                    |   1 -
 polkadot/runtime/test-runtime/src/lib.rs      |   2 +
 polkadot/runtime/westend/src/lib.rs           |   2 +
 polkadot/runtime/westend/src/tests.rs         |  99 +-
 .../westend/src/weights/pallet_staking.rs     | 386 ++++----
 prdoc/pr_5501.prdoc                           |  47 +
 substrate/bin/node/runtime/src/lib.rs         |  33 +-
 substrate/bin/node/testing/src/genesis.rs     |   4 +-
 substrate/frame/babe/src/mock.rs              |   1 +
 substrate/frame/beefy/src/mock.rs             |   1 +
 substrate/frame/delegated-staking/src/lib.rs  |   2 +-
 substrate/frame/delegated-staking/src/mock.rs |   1 +
 .../frame/delegated-staking/src/tests.rs      |  11 +-
 .../frame/delegated-staking/src/types.rs      |   6 -
 .../test-staking-e2e/Cargo.toml               |   2 +
 .../test-staking-e2e/src/lib.rs               |  32 +-
 .../test-staking-e2e/src/mock.rs              |  39 +-
 substrate/frame/fast-unstake/src/mock.rs      |   6 +-
 substrate/frame/fast-unstake/src/tests.rs     |  16 +-
 substrate/frame/grandpa/src/mock.rs           |   1 +
 .../benchmarking/src/inner.rs                 |  75 +-
 .../nomination-pools/benchmarking/src/mock.rs |   1 +
 .../frame/nomination-pools/src/adapter.rs     |   6 +-
 substrate/frame/nomination-pools/src/mock.rs  | 202 +++-
 substrate/frame/nomination-pools/src/tests.rs | 326 ++-----
 .../test-delegate-stake/src/lib.rs            |  22 +-
 .../test-delegate-stake/src/mock.rs           |   4 +
 .../test-transfer-stake/Cargo.toml            |  39 -
 .../test-transfer-stake/src/lib.rs            | 912 ------------------
 .../test-transfer-stake/src/mock.rs           | 231 -----
 .../frame/offences/benchmarking/src/inner.rs  |  10 +-
 .../frame/offences/benchmarking/src/mock.rs   |   1 +
 substrate/frame/root-offences/src/mock.rs     |   9 +-
 .../frame/session/benchmarking/src/mock.rs    |   1 +
 substrate/frame/staking/Cargo.toml            |   1 +
 substrate/frame/staking/src/asset.rs          |  88 +-
 substrate/frame/staking/src/benchmarking.rs   |  23 +-
 substrate/frame/staking/src/ledger.rs         |  10 +-
 substrate/frame/staking/src/lib.rs            |  12 +-
 substrate/frame/staking/src/mock.rs           |  47 +-
 substrate/frame/staking/src/pallet/impls.rs   | 121 ++-
 substrate/frame/staking/src/pallet/mod.rs     |  80 +-
 substrate/frame/staking/src/testing_utils.rs  |  18 +
 substrate/frame/staking/src/tests.rs          | 854 +++++++++++-----
 substrate/frame/staking/src/weights.rs        | 755 ++++++++-------
 substrate/primitives/staking/src/lib.rs       |   2 +-
 47 files changed, 2100 insertions(+), 2466 deletions(-)
 create mode 100644 prdoc/pr_5501.prdoc
 delete mode 100644 substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml
 delete mode 100644 substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs
 delete mode 100644 substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs

diff --git a/Cargo.lock b/Cargo.lock
index 6eba7e65109..42ed88fb0d0 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -13369,6 +13369,7 @@ dependencies = [
  "log",
  "pallet-bags-list 27.0.0",
  "pallet-balances 28.0.0",
+ "pallet-delegated-staking 1.0.0",
  "pallet-election-provider-multi-phase 27.0.0",
  "pallet-nomination-pools 25.0.0",
  "pallet-session 28.0.0",
@@ -14459,29 +14460,6 @@ dependencies = [
  "sp-tracing 16.0.0",
 ]
 
-[[package]]
-name = "pallet-nomination-pools-test-transfer-stake"
-version = "1.0.0"
-dependencies = [
- "frame-election-provider-support 28.0.0",
- "frame-support 28.0.0",
- "frame-system 28.0.0",
- "log",
- "pallet-bags-list 27.0.0",
- "pallet-balances 28.0.0",
- "pallet-nomination-pools 25.0.0",
- "pallet-staking 28.0.0",
- "pallet-staking-reward-curve",
- "pallet-timestamp 27.0.0",
- "parity-scale-codec",
- "scale-info",
- "sp-core 28.0.0",
- "sp-io 30.0.0",
- "sp-runtime 31.0.1",
- "sp-staking 26.0.0",
- "sp-tracing 16.0.0",
-]
-
 [[package]]
 name = "pallet-offences"
 version = "27.0.0"
diff --git a/Cargo.toml b/Cargo.toml
index 509775fe99e..e17f08148b1 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -389,7 +389,6 @@ members = [
 	"substrate/frame/nomination-pools/fuzzer",
 	"substrate/frame/nomination-pools/runtime-api",
 	"substrate/frame/nomination-pools/test-delegate-stake",
-	"substrate/frame/nomination-pools/test-transfer-stake",
 	"substrate/frame/offences",
 	"substrate/frame/offences/benchmarking",
 	"substrate/frame/paged-list",
diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs
index 4f9ba8d8508..cdf6fa92da2 100644
--- a/polkadot/runtime/test-runtime/src/lib.rs
+++ b/polkadot/runtime/test-runtime/src/lib.rs
@@ -366,11 +366,13 @@ impl onchain::Config for OnChainSeqPhragmen {
 const MAX_QUOTA_NOMINATIONS: u32 = 16;
 
 impl pallet_staking::Config for Runtime {
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type CurrencyBalance = Balance;
 	type UnixTime = Timestamp;
 	type CurrencyToVote = polkadot_runtime_common::CurrencyToVote;
 	type RewardRemainder = ();
+	type RuntimeHoldReason = RuntimeHoldReason;
 	type RuntimeEvent = RuntimeEvent;
 	type Slash = ();
 	type Reward = ();
diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index 58d2bdcb7c7..8a5771fe7cc 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -728,8 +728,10 @@ parameter_types! {
 }
 
 impl pallet_staking::Config for Runtime {
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type CurrencyBalance = Balance;
+	type RuntimeHoldReason = RuntimeHoldReason;
 	type UnixTime = Timestamp;
 	type CurrencyToVote = CurrencyToVote;
 	type RewardRemainder = ();
diff --git a/polkadot/runtime/westend/src/tests.rs b/polkadot/runtime/westend/src/tests.rs
index fcdaf7ff2de..65b81cc00f0 100644
--- a/polkadot/runtime/westend/src/tests.rs
+++ b/polkadot/runtime/westend/src/tests.rs
@@ -155,25 +155,27 @@ mod remote_tests {
 
 		let transport: Transport = var("WS").unwrap_or("ws://127.0.0.1:9900".to_string()).into();
 		let maybe_state_snapshot: Option<SnapshotConfig> = var("SNAP").map(|s| s.into()).ok();
+		let online_config = OnlineConfig {
+			transport,
+			state_snapshot: maybe_state_snapshot.clone(),
+			child_trie: false,
+			pallets: vec![
+				"Staking".into(),
+				"System".into(),
+				"Balances".into(),
+				"NominationPools".into(),
+				"DelegatedStaking".into(),
+			],
+			..Default::default()
+		};
 		let mut ext = Builder::<Block>::default()
 			.mode(if let Some(state_snapshot) = maybe_state_snapshot {
 				Mode::OfflineOrElseOnline(
 					OfflineConfig { state_snapshot: state_snapshot.clone() },
-					OnlineConfig {
-						transport,
-						state_snapshot: Some(state_snapshot),
-						pallets: vec![
-							"staking".into(),
-							"system".into(),
-							"balances".into(),
-							"nomination-pools".into(),
-							"delegated-staking".into(),
-						],
-						..Default::default()
-					},
+					online_config,
 				)
 			} else {
-				Mode::Online(OnlineConfig { transport, ..Default::default() })
+				Mode::Online(online_config)
 			})
 			.build()
 			.await
@@ -241,6 +243,77 @@ mod remote_tests {
 			);
 		});
 	}
+
+	#[tokio::test]
+	async fn staking_curr_fun_migrate() {
+		// Intended to be run only manually.
+		if var("RUN_MIGRATION_TESTS").is_err() {
+			return;
+		}
+		sp_tracing::try_init_simple();
+
+		let transport: Transport = var("WS").unwrap_or("ws://127.0.0.1:9944".to_string()).into();
+		let maybe_state_snapshot: Option<SnapshotConfig> = var("SNAP").map(|s| s.into()).ok();
+		let online_config = OnlineConfig {
+			transport,
+			state_snapshot: maybe_state_snapshot.clone(),
+			child_trie: false,
+			pallets: vec!["Staking".into(), "System".into(), "Balances".into()],
+			..Default::default()
+		};
+		let mut ext = Builder::<Block>::default()
+			.mode(if let Some(state_snapshot) = maybe_state_snapshot {
+				Mode::OfflineOrElseOnline(
+					OfflineConfig { state_snapshot: state_snapshot.clone() },
+					online_config,
+				)
+			} else {
+				Mode::Online(online_config)
+			})
+			.build()
+			.await
+			.unwrap();
+		ext.execute_with(|| {
+			// create an account with some balance
+			let alice = AccountId::from([1u8; 32]);
+			use frame_support::traits::Currency;
+			let _ = Balances::deposit_creating(&alice, 100_000 * UNITS);
+
+			let mut success = 0;
+			let mut err = 0;
+			let mut force_withdraw_acc = 0;
+			// iterate over all pools
+			pallet_staking::Ledger::<Runtime>::iter().for_each(|(ctrl, ledger)| {
+				match pallet_staking::Pallet::<Runtime>::migrate_currency(
+					RuntimeOrigin::signed(alice.clone()).into(),
+					ledger.stash.clone(),
+				) {
+					Ok(_) => {
+						let updated_ledger =
+							pallet_staking::Ledger::<Runtime>::get(&ctrl).expect("ledger exists");
+						let force_withdraw = ledger.total - updated_ledger.total;
+						if force_withdraw > 0 {
+							force_withdraw_acc += force_withdraw;
+							log::info!(target: "remote_test", "Force withdraw from stash {:?}: value {:?}", ledger.stash, force_withdraw);
+						}
+						success += 1;
+					},
+					Err(e) => {
+						log::error!(target: "remote_test", "Error migrating {:?}: {:?}", ledger.stash, e);
+						err += 1;
+					},
+				}
+			});
+
+			log::info!(
+				target: "remote_test",
+				"Migration stats: success: {}, err: {}, total force withdrawn stake: {}",
+				success,
+				err,
+				force_withdraw_acc
+			);
+		});
+	}
 }
 
 #[test]
diff --git a/polkadot/runtime/westend/src/weights/pallet_staking.rs b/polkadot/runtime/westend/src/weights/pallet_staking.rs
index 393fa0b3717..f1e7f5ba157 100644
--- a/polkadot/runtime/westend/src/weights/pallet_staking.rs
+++ b/polkadot/runtime/westend/src/weights/pallet_staking.rs
@@ -17,9 +17,9 @@
 //! Autogenerated weights for `pallet_staking`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-03-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-09-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `runner-obbyq9g6-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
@@ -52,19 +52,19 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Payee` (r:0 w:1)
 	/// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
 	fn bond() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1009`
-		//  Estimated: `4764`
-		// Minimum execution time: 40_585_000 picoseconds.
-		Weight::from_parts(41_800_000, 0)
-			.saturating_add(Weight::from_parts(0, 4764))
+		//  Measured:  `1035`
+		//  Estimated: `4556`
+		// Minimum execution time: 70_147_000 picoseconds.
+		Weight::from_parts(71_795_000, 0)
+			.saturating_add(Weight::from_parts(0, 4556))
 			.saturating_add(T::DbWeight::get().reads(4))
 			.saturating_add(T::DbWeight::get().writes(4))
 	}
@@ -72,20 +72,20 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListNodes` (r:3 w:3)
 	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListBags` (r:2 w:2)
 	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
 	fn bond_extra() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1921`
+		//  Measured:  `1947`
 		//  Estimated: `8877`
-		// Minimum execution time: 81_809_000 picoseconds.
-		Weight::from_parts(84_387_000, 0)
+		// Minimum execution time: 125_203_000 picoseconds.
+		Weight::from_parts(128_088_000, 0)
 			.saturating_add(Weight::from_parts(0, 8877))
 			.saturating_add(T::DbWeight::get().reads(9))
 			.saturating_add(T::DbWeight::get().writes(7))
@@ -100,23 +100,23 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::CurrentEra` (r:1 w:0)
 	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:0)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListNodes` (r:3 w:3)
 	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListBags` (r:2 w:2)
 	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
 	fn unbond() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `2128`
+		//  Measured:  `2051`
 		//  Estimated: `8877`
-		// Minimum execution time: 89_419_000 picoseconds.
-		Weight::from_parts(91_237_000, 0)
+		// Minimum execution time: 101_991_000 picoseconds.
+		Weight::from_parts(104_567_000, 0)
 			.saturating_add(Weight::from_parts(0, 8877))
 			.saturating_add(T::DbWeight::get().reads(12))
-			.saturating_add(T::DbWeight::get().writes(7))
+			.saturating_add(T::DbWeight::get().writes(6))
 	}
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
@@ -124,23 +124,25 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::CurrentEra` (r:1 w:0)
 	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
 	/// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0)
 	/// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
+	/// Storage: `DelegatedStaking::Agents` (r:1 w:0)
+	/// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[0, 100]`.
 	fn withdraw_unbonded_update(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1223`
-		//  Estimated: `4764`
-		// Minimum execution time: 45_152_000 picoseconds.
-		Weight::from_parts(46_460_819, 0)
-			.saturating_add(Weight::from_parts(0, 4764))
-			// Standard Error: 972
-			.saturating_add(Weight::from_parts(55_473, 0).saturating_mul(s.into()))
-			.saturating_add(T::DbWeight::get().reads(6))
+		//  Measured:  `1253`
+		//  Estimated: `4556`
+		// Minimum execution time: 76_450_000 picoseconds.
+		Weight::from_parts(78_836_594, 0)
+			.saturating_add(Weight::from_parts(0, 4556))
+			// Standard Error: 1_529
+			.saturating_add(Weight::from_parts(66_662, 0).saturating_mul(s.into()))
+			.saturating_add(T::DbWeight::get().reads(7))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
 	/// Storage: `Staking::Ledger` (r:1 w:1)
@@ -151,10 +153,10 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::SlashingSpans` (r:1 w:1)
 	/// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:1)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Validators` (r:1 w:0)
 	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Nominators` (r:1 w:1)
@@ -174,15 +176,15 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// The range of component `s` is `[0, 100]`.
 	fn withdraw_unbonded_kill(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `2127 + s * (4 ±0)`
+		//  Measured:  `2153 + s * (4 ±0)`
 		//  Estimated: `6248 + s * (4 ±0)`
-		// Minimum execution time: 82_762_000 picoseconds.
-		Weight::from_parts(91_035_077, 0)
+		// Minimum execution time: 121_962_000 picoseconds.
+		Weight::from_parts(131_000_151, 0)
 			.saturating_add(Weight::from_parts(0, 6248))
-			// Standard Error: 3_771
-			.saturating_add(Weight::from_parts(1_217_871, 0).saturating_mul(s.into()))
+			// Standard Error: 3_846
+			.saturating_add(Weight::from_parts(1_277_843, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(13))
-			.saturating_add(T::DbWeight::get().writes(11))
+			.saturating_add(T::DbWeight::get().writes(12))
 			.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into())))
 			.saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into()))
 	}
@@ -210,10 +212,10 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn validate() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1301`
+		//  Measured:  `1334`
 		//  Estimated: `4556`
-		// Minimum execution time: 50_555_000 picoseconds.
-		Weight::from_parts(52_052_000, 0)
+		// Minimum execution time: 66_450_000 picoseconds.
+		Weight::from_parts(68_302_000, 0)
 			.saturating_add(Weight::from_parts(0, 4556))
 			.saturating_add(T::DbWeight::get().reads(11))
 			.saturating_add(T::DbWeight::get().writes(5))
@@ -227,13 +229,13 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// The range of component `k` is `[1, 128]`.
 	fn kick(k: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1778 + k * (572 ±0)`
+		//  Measured:  `1811 + k * (572 ±0)`
 		//  Estimated: `4556 + k * (3033 ±0)`
-		// Minimum execution time: 35_037_000 picoseconds.
-		Weight::from_parts(35_081_878, 0)
+		// Minimum execution time: 43_875_000 picoseconds.
+		Weight::from_parts(47_332_240, 0)
 			.saturating_add(Weight::from_parts(0, 4556))
-			// Standard Error: 5_473
-			.saturating_add(Weight::from_parts(6_667_924, 0).saturating_mul(k.into()))
+			// Standard Error: 6_530
+			.saturating_add(Weight::from_parts(7_398_001, 0).saturating_mul(k.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into())))
 			.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into())))
@@ -264,13 +266,13 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// The range of component `n` is `[1, 16]`.
 	fn nominate(n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1797 + n * (102 ±0)`
+		//  Measured:  `1830 + n * (102 ±0)`
 		//  Estimated: `6248 + n * (2520 ±0)`
-		// Minimum execution time: 62_098_000 picoseconds.
-		Weight::from_parts(60_154_061, 0)
+		// Minimum execution time: 80_640_000 picoseconds.
+		Weight::from_parts(78_801_092, 0)
 			.saturating_add(Weight::from_parts(0, 6248))
-			// Standard Error: 19_257
-			.saturating_add(Weight::from_parts(3_839_855, 0).saturating_mul(n.into()))
+			// Standard Error: 22_249
+			.saturating_add(Weight::from_parts(4_996_344, 0).saturating_mul(n.into()))
 			.saturating_add(T::DbWeight::get().reads(12))
 			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into())))
 			.saturating_add(T::DbWeight::get().writes(6))
@@ -294,10 +296,10 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn chill() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1747`
+		//  Measured:  `1780`
 		//  Estimated: `6248`
-		// Minimum execution time: 54_993_000 picoseconds.
-		Weight::from_parts(56_698_000, 0)
+		// Minimum execution time: 71_494_000 picoseconds.
+		Weight::from_parts(73_487_000, 0)
 			.saturating_add(Weight::from_parts(0, 6248))
 			.saturating_add(T::DbWeight::get().reads(9))
 			.saturating_add(T::DbWeight::get().writes(6))
@@ -310,10 +312,10 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
 	fn set_payee() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `865`
+		//  Measured:  `898`
 		//  Estimated: `4556`
-		// Minimum execution time: 18_100_000 picoseconds.
-		Weight::from_parts(18_547_000, 0)
+		// Minimum execution time: 24_310_000 picoseconds.
+		Weight::from_parts(24_676_000, 0)
 			.saturating_add(Weight::from_parts(0, 4556))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -326,10 +328,10 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
 	fn update_payee() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `932`
+		//  Measured:  `965`
 		//  Estimated: `4556`
-		// Minimum execution time: 23_428_000 picoseconds.
-		Weight::from_parts(24_080_000, 0)
+		// Minimum execution time: 31_348_000 picoseconds.
+		Weight::from_parts(32_384_000, 0)
 			.saturating_add(Weight::from_parts(0, 4556))
 			.saturating_add(T::DbWeight::get().reads(3))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -340,10 +342,10 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
 	fn set_controller() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `865`
+		//  Measured:  `898`
 		//  Estimated: `8122`
-		// Minimum execution time: 21_159_000 picoseconds.
-		Weight::from_parts(21_706_000, 0)
+		// Minimum execution time: 27_537_000 picoseconds.
+		Weight::from_parts(28_714_000, 0)
 			.saturating_add(Weight::from_parts(0, 8122))
 			.saturating_add(T::DbWeight::get().reads(3))
 			.saturating_add(T::DbWeight::get().writes(3))
@@ -354,8 +356,8 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 1_910_000 picoseconds.
-		Weight::from_parts(2_003_000, 0)
+		// Minimum execution time: 2_362_000 picoseconds.
+		Weight::from_parts(2_518_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -365,8 +367,8 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 7_076_000 picoseconds.
-		Weight::from_parts(7_349_000, 0)
+		// Minimum execution time: 7_752_000 picoseconds.
+		Weight::from_parts(8_105_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -376,8 +378,8 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 7_067_000 picoseconds.
-		Weight::from_parts(7_389_000, 0)
+		// Minimum execution time: 7_868_000 picoseconds.
+		Weight::from_parts(8_175_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -387,8 +389,8 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 7_148_000 picoseconds.
-		Weight::from_parts(7_446_000, 0)
+		// Minimum execution time: 7_945_000 picoseconds.
+		Weight::from_parts(8_203_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -399,11 +401,11 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 2_025_000 picoseconds.
-		Weight::from_parts(2_229_953, 0)
+		// Minimum execution time: 2_458_000 picoseconds.
+		Weight::from_parts(2_815_664, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 			// Standard Error: 67
-			.saturating_add(Weight::from_parts(11_785, 0).saturating_mul(v.into()))
+			.saturating_add(Weight::from_parts(12_287, 0).saturating_mul(v.into()))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `Staking::Ledger` (r:1502 w:1502)
@@ -415,13 +417,13 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// The range of component `i` is `[0, 751]`.
 	fn deprecate_controller_batch(i: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `680 + i * (227 ±0)`
+		//  Measured:  `713 + i * (227 ±0)`
 		//  Estimated: `990 + i * (7132 ±0)`
-		// Minimum execution time: 4_321_000 picoseconds.
-		Weight::from_parts(4_407_000, 0)
+		// Minimum execution time: 4_976_000 picoseconds.
+		Weight::from_parts(5_102_000, 0)
 			.saturating_add(Weight::from_parts(0, 990))
-			// Standard Error: 37_239
-			.saturating_add(Weight::from_parts(21_300_598, 0).saturating_mul(i.into()))
+			// Standard Error: 36_458
+			.saturating_add(Weight::from_parts(25_359_275, 0).saturating_mul(i.into()))
 			.saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(i.into())))
 			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(i.into())))
 			.saturating_add(Weight::from_parts(0, 7132).saturating_mul(i.into()))
@@ -432,10 +434,10 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:1)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
 	/// Storage: `System::Account` (r:1 w:1)
 	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Validators` (r:1 w:0)
@@ -457,15 +459,15 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// The range of component `s` is `[0, 100]`.
 	fn force_unstake(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `2127 + s * (4 ±0)`
+		//  Measured:  `2153 + s * (4 ±0)`
 		//  Estimated: `6248 + s * (4 ±0)`
-		// Minimum execution time: 78_908_000 picoseconds.
-		Weight::from_parts(84_886_373, 0)
+		// Minimum execution time: 116_776_000 picoseconds.
+		Weight::from_parts(125_460_389, 0)
 			.saturating_add(Weight::from_parts(0, 6248))
-			// Standard Error: 3_376
-			.saturating_add(Weight::from_parts(1_217_850, 0).saturating_mul(s.into()))
+			// Standard Error: 3_095
+			.saturating_add(Weight::from_parts(1_300_502, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(13))
-			.saturating_add(T::DbWeight::get().writes(12))
+			.saturating_add(T::DbWeight::get().writes(13))
 			.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into())))
 			.saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into()))
 	}
@@ -474,13 +476,13 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// The range of component `s` is `[1, 1000]`.
 	fn cancel_deferred_slash(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `66639`
-		//  Estimated: `70104`
-		// Minimum execution time: 136_389_000 picoseconds.
-		Weight::from_parts(1_207_241_524, 0)
-			.saturating_add(Weight::from_parts(0, 70104))
-			// Standard Error: 77_138
-			.saturating_add(Weight::from_parts(6_443_948, 0).saturating_mul(s.into()))
+		//  Measured:  `66672`
+		//  Estimated: `70137`
+		// Minimum execution time: 135_135_000 picoseconds.
+		Weight::from_parts(937_565_332, 0)
+			.saturating_add(Weight::from_parts(0, 70137))
+			// Standard Error: 57_675
+			.saturating_add(Weight::from_parts(4_828_080, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -498,12 +500,10 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::ErasValidatorReward` (r:1 w:0)
 	/// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:65 w:65)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:65 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
-	/// Storage: `System::Account` (r:65 w:65)
-	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:65 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:65 w:65)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::ErasStakersPaged` (r:1 w:0)
 	/// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Staking::ErasRewardPoints` (r:1 w:0)
@@ -512,30 +512,32 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Payee` (r:65 w:0)
 	/// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:65 w:65)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	/// The range of component `n` is `[0, 64]`.
 	fn payout_stakers_alive_staked(n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `8249 + n * (396 ±0)`
-		//  Estimated: `10779 + n * (3774 ±0)`
-		// Minimum execution time: 130_222_000 picoseconds.
-		Weight::from_parts(167_236_150, 0)
-			.saturating_add(Weight::from_parts(0, 10779))
-			// Standard Error: 34_051
-			.saturating_add(Weight::from_parts(39_899_917, 0).saturating_mul(n.into()))
+		//  Measured:  `8275 + n * (389 ±0)`
+		//  Estimated: `10805 + n * (3566 ±0)`
+		// Minimum execution time: 180_144_000 picoseconds.
+		Weight::from_parts(237_134_733, 0)
+			.saturating_add(Weight::from_parts(0, 10805))
+			// Standard Error: 52_498
+			.saturating_add(Weight::from_parts(73_633_326, 0).saturating_mul(n.into()))
 			.saturating_add(T::DbWeight::get().reads(14))
 			.saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into())))
 			.saturating_add(T::DbWeight::get().writes(4))
 			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into())))
-			.saturating_add(Weight::from_parts(0, 3774).saturating_mul(n.into()))
+			.saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into()))
 	}
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Bonded` (r:1 w:0)
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:0)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListNodes` (r:3 w:3)
 	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListBags` (r:2 w:2)
@@ -543,26 +545,26 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// The range of component `l` is `[1, 32]`.
 	fn rebond(l: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1922 + l * (5 ±0)`
+		//  Measured:  `1845 + l * (5 ±0)`
 		//  Estimated: `8877`
-		// Minimum execution time: 79_136_000 picoseconds.
-		Weight::from_parts(82_129_497, 0)
+		// Minimum execution time: 89_307_000 picoseconds.
+		Weight::from_parts(92_902_634, 0)
 			.saturating_add(Weight::from_parts(0, 8877))
-			// Standard Error: 3_867
-			.saturating_add(Weight::from_parts(75_156, 0).saturating_mul(l.into()))
+			// Standard Error: 4_446
+			.saturating_add(Weight::from_parts(73_546, 0).saturating_mul(l.into()))
 			.saturating_add(T::DbWeight::get().reads(9))
-			.saturating_add(T::DbWeight::get().writes(7))
+			.saturating_add(T::DbWeight::get().writes(6))
 	}
+	/// Storage: `Staking::VirtualStakers` (r:1 w:1)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Bonded` (r:1 w:1)
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::SlashingSpans` (r:1 w:1)
 	/// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Validators` (r:1 w:0)
 	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Nominators` (r:1 w:1)
@@ -582,15 +584,15 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// The range of component `s` is `[1, 100]`.
 	fn reap_stash(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `2127 + s * (4 ±0)`
+		//  Measured:  `2153 + s * (4 ±0)`
 		//  Estimated: `6248 + s * (4 ±0)`
-		// Minimum execution time: 89_375_000 picoseconds.
-		Weight::from_parts(91_224_907, 0)
+		// Minimum execution time: 130_544_000 picoseconds.
+		Weight::from_parts(133_260_598, 0)
 			.saturating_add(Weight::from_parts(0, 6248))
-			// Standard Error: 3_424
-			.saturating_add(Weight::from_parts(1_219_542, 0).saturating_mul(s.into()))
+			// Standard Error: 3_545
+			.saturating_add(Weight::from_parts(1_313_348, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(12))
-			.saturating_add(T::DbWeight::get().writes(11))
+			.saturating_add(T::DbWeight::get().writes(12))
 			.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into())))
 			.saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into()))
 	}
@@ -633,14 +635,14 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	fn new_era(v: u32, n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0 + n * (716 ±0) + v * (3594 ±0)`
-		//  Estimated: `456136 + n * (3566 ±4) + v * (3566 ±0)`
-		// Minimum execution time: 520_905_000 picoseconds.
-		Weight::from_parts(523_771_000, 0)
+		//  Estimated: `456136 + n * (3566 ±4) + v * (3566 ±40)`
+		// Minimum execution time: 654_756_000 picoseconds.
+		Weight::from_parts(658_861_000, 0)
 			.saturating_add(Weight::from_parts(0, 456136))
-			// Standard Error: 2_142_714
-			.saturating_add(Weight::from_parts(68_631_588, 0).saturating_mul(v.into()))
-			// Standard Error: 213_509
-			.saturating_add(Weight::from_parts(19_343_025, 0).saturating_mul(n.into()))
+			// Standard Error: 2_078_102
+			.saturating_add(Weight::from_parts(67_775_668, 0).saturating_mul(v.into()))
+			// Standard Error: 207_071
+			.saturating_add(Weight::from_parts(22_624_711, 0).saturating_mul(n.into()))
 			.saturating_add(T::DbWeight::get().reads(184))
 			.saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into())))
 			.saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into())))
@@ -669,15 +671,15 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// The range of component `n` is `[500, 1000]`.
 	fn get_npos_voters(v: u32, n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `3108 + n * (907 ±0) + v * (391 ±0)`
+		//  Measured:  `3141 + n * (907 ±0) + v * (391 ±0)`
 		//  Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)`
-		// Minimum execution time: 36_848_619_000 picoseconds.
-		Weight::from_parts(37_362_442_000, 0)
+		// Minimum execution time: 42_790_195_000 picoseconds.
+		Weight::from_parts(42_954_437_000, 0)
 			.saturating_add(Weight::from_parts(0, 456136))
-			// Standard Error: 415_031
-			.saturating_add(Weight::from_parts(5_204_987, 0).saturating_mul(v.into()))
-			// Standard Error: 415_031
-			.saturating_add(Weight::from_parts(4_132_636, 0).saturating_mul(n.into()))
+			// Standard Error: 478_107
+			.saturating_add(Weight::from_parts(6_744_044, 0).saturating_mul(v.into()))
+			// Standard Error: 478_107
+			.saturating_add(Weight::from_parts(4_837_739, 0).saturating_mul(n.into()))
 			.saturating_add(T::DbWeight::get().reads(179))
 			.saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into())))
 			.saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into())))
@@ -692,13 +694,13 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// The range of component `v` is `[500, 1000]`.
 	fn get_npos_targets(v: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `946 + v * (50 ±0)`
+		//  Measured:  `979 + v * (50 ±0)`
 		//  Estimated: `3510 + v * (2520 ±0)`
-		// Minimum execution time: 2_512_817_000 picoseconds.
-		Weight::from_parts(119_401_374, 0)
+		// Minimum execution time: 2_851_801_000 picoseconds.
+		Weight::from_parts(4_477_533, 0)
 			.saturating_add(Weight::from_parts(0, 3510))
-			// Standard Error: 8_463
-			.saturating_add(Weight::from_parts(4_860_364, 0).saturating_mul(v.into()))
+			// Standard Error: 8_644
+			.saturating_add(Weight::from_parts(5_811_682, 0).saturating_mul(v.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into())))
 			.saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into()))
@@ -721,8 +723,8 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 3_686_000 picoseconds.
-		Weight::from_parts(3_881_000, 0)
+		// Minimum execution time: 4_250_000 picoseconds.
+		Weight::from_parts(4_472_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 			.saturating_add(T::DbWeight::get().writes(7))
 	}
@@ -744,8 +746,8 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 3_143_000 picoseconds.
-		Weight::from_parts(3_424_000, 0)
+		// Minimum execution time: 3_986_000 picoseconds.
+		Weight::from_parts(4_144_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 			.saturating_add(T::DbWeight::get().writes(7))
 	}
@@ -773,10 +775,10 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn chill_other() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1870`
+		//  Measured:  `1903`
 		//  Estimated: `6248`
-		// Minimum execution time: 66_946_000 picoseconds.
-		Weight::from_parts(69_382_000, 0)
+		// Minimum execution time: 87_291_000 picoseconds.
+		Weight::from_parts(89_344_000, 0)
 			.saturating_add(Weight::from_parts(0, 6248))
 			.saturating_add(T::DbWeight::get().reads(12))
 			.saturating_add(T::DbWeight::get().writes(6))
@@ -787,10 +789,10 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
 	fn force_apply_min_commission() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `658`
+		//  Measured:  `691`
 		//  Estimated: `3510`
-		// Minimum execution time: 11_278_000 picoseconds.
-		Weight::from_parts(11_603_000, 0)
+		// Minimum execution time: 16_113_000 picoseconds.
+		Weight::from_parts(16_593_000, 0)
 			.saturating_add(Weight::from_parts(0, 3510))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
@@ -801,29 +803,53 @@ impl<T: frame_system::Config> pallet_staking::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 1_963_000 picoseconds.
-		Weight::from_parts(2_077_000, 0)
+		// Minimum execution time: 2_433_000 picoseconds.
+		Weight::from_parts(2_561_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `System::Account` (r:1 w:1)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:0)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:1 w:0)
 	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Locks` (r:1 w:0)
+	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Bonded` (r:1 w:1)
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
+	fn restore_ledger() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `1040`
+		//  Estimated: `4764`
+		// Minimum execution time: 50_167_000 picoseconds.
+		Weight::from_parts(51_108_000, 0)
+			.saturating_add(Weight::from_parts(0, 4764))
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
+	}
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Bonded` (r:1 w:0)
+	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Ledger` (r:1 w:0)
+	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Locks` (r:1 w:1)
+	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`)
 	/// Storage: `Balances::Freezes` (r:1 w:0)
 	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
-	fn restore_ledger() -> Weight {
+	fn migrate_currency() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1014`
+		//  Measured:  `1209`
 		//  Estimated: `4764`
-		// Minimum execution time: 40_258_000 picoseconds.
-		Weight::from_parts(41_210_000, 0)
+		// Minimum execution time: 91_790_000 picoseconds.
+		Weight::from_parts(92_991_000, 0)
 			.saturating_add(Weight::from_parts(0, 4764))
-			.saturating_add(T::DbWeight::get().reads(5))
-			.saturating_add(T::DbWeight::get().writes(4))
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
 	}
 }
diff --git a/prdoc/pr_5501.prdoc b/prdoc/pr_5501.prdoc
new file mode 100644
index 00000000000..f2a5aa9a466
--- /dev/null
+++ b/prdoc/pr_5501.prdoc
@@ -0,0 +1,47 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Currency to Fungible migration for pallet-staking
+
+doc:
+  - audience: Runtime User
+    description: |
+      Lazy migration of staking balance from `Currency::locks` to `Fungible::holds`. New extrinsic
+      `staking::migrate_currency` removes the old lock along with other housekeeping. Additionally, any ledger mutation
+      creates hold if it does not exist.
+
+      The pallet-staking configuration item `Currency` is updated to use `fungible::hold::Mutate` type while still
+      requiring `LockableCurrency` type to be passed as `OldCurrency` for migration purposes.
+
+
+crates:
+  - name: westend-runtime
+    bump: major
+  - name: kitchensink-runtime
+    bump: minor
+  - name: pallet-delegated-staking
+    bump: patch
+  - name: pallet-nomination-pools
+    bump: minor
+  - name: pallet-nomination-pools-runtime-api
+    bump: patch
+  - name: sp-staking
+    bump: patch
+  - name: pallet-beefy
+    bump: patch
+  - name: pallet-fast-unstake
+    bump: patch
+  - name: pallet-staking
+    bump: major
+  - name: pallet-grandpa
+    bump: patch
+  - name: pallet-babe
+    bump: patch
+  - name: pallet-nomination-pools-benchmarking
+    bump: patch
+  - name: pallet-session-benchmarking
+    bump: patch
+  - name: pallet-root-offences
+    bump: patch
+  - name: pallet-offences-benchmarking
+    bump: patch
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index de377a55bc8..117d306e306 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -53,7 +53,9 @@ use frame_support::{
 			Balanced, Credit, HoldConsideration, ItemOf, NativeFromLeft, NativeOrWithId, UnionOf,
 		},
 		tokens::{
-			imbalance::ResolveAssetTo, nonfungibles_v2::Inspect, pay::PayAssetFromAccount,
+			imbalance::{ResolveAssetTo, ResolveTo},
+			nonfungibles_v2::Inspect,
+			pay::PayAssetFromAccount,
 			GetSalary, PayFromAccount,
 		},
 		AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU16, ConstU32, ConstU64,
@@ -719,13 +721,15 @@ impl pallet_staking::BenchmarkingConfig for StakingBenchmarkingConfig {
 }
 
 impl pallet_staking::Config for Runtime {
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type CurrencyBalance = Balance;
 	type UnixTime = Timestamp;
 	type CurrencyToVote = sp_staking::currency_to_vote::U128CurrencyToVote;
-	type RewardRemainder = Treasury;
+	type RewardRemainder = ResolveTo<TreasuryAccount, Balances>;
 	type RuntimeEvent = RuntimeEvent;
-	type Slash = Treasury; // send the slashed funds to the treasury.
+	type RuntimeHoldReason = RuntimeHoldReason;
+	type Slash = ResolveTo<TreasuryAccount, Balances>; // send the slashed funds to the treasury.
 	type Reward = (); // rewards are minted from the void
 	type SessionsPerEra = SessionsPerEra;
 	type BondingDuration = BondingDuration;
@@ -748,7 +752,7 @@ impl pallet_staking::Config for Runtime {
 	type MaxUnlockingChunks = ConstU32<32>;
 	type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch;
 	type HistoryDepth = HistoryDepth;
-	type EventListeners = NominationPools;
+	type EventListeners = (NominationPools, DelegatedStaking);
 	type WeightInfo = pallet_staking::weights::SubstrateWeight<Runtime>;
 	type BenchmarkingConfig = StakingBenchmarkingConfig;
 	type DisablingStrategy = pallet_staking::UpToLimitWithReEnablingDisablingStrategy;
@@ -932,6 +936,21 @@ impl pallet_bags_list::Config<VoterBagsListInstance> for Runtime {
 	type WeightInfo = pallet_bags_list::weights::SubstrateWeight<Runtime>;
 }
 
+parameter_types! {
+	pub const DelegatedStakingPalletId: PalletId = PalletId(*b"py/dlstk");
+	pub const SlashRewardFraction: Perbill = Perbill::from_percent(1);
+}
+
+impl pallet_delegated_staking::Config for Runtime {
+	type RuntimeEvent = RuntimeEvent;
+	type PalletId = DelegatedStakingPalletId;
+	type Currency = Balances;
+	type OnSlash = ();
+	type SlashRewardFraction = SlashRewardFraction;
+	type RuntimeHoldReason = RuntimeHoldReason;
+	type CoreStaking = Staking;
+}
+
 parameter_types! {
 	pub const PostUnbondPoolsWindow: u32 = 4;
 	pub const NominationPoolsPalletId: PalletId = PalletId(*b"py/nopls");
@@ -960,7 +979,8 @@ impl pallet_nomination_pools::Config for Runtime {
 	type RewardCounter = FixedU128;
 	type BalanceToU256 = BalanceToU256;
 	type U256ToBalance = U256ToBalance;
-	type StakeAdapter = pallet_nomination_pools::adapter::TransferStake<Self, Staking>;
+	type StakeAdapter =
+		pallet_nomination_pools::adapter::DelegateStake<Self, Staking, DelegatedStaking>;
 	type PostUnbondingPoolsWindow = PostUnbondPoolsWindow;
 	type MaxMetadataLen = ConstU32<256>;
 	type MaxUnbonding = ConstU32<8>;
@@ -2691,6 +2711,9 @@ mod runtime {
 	#[runtime::pallet_index(81)]
 	pub type VerifySignature = pallet_verify_signature::Pallet<Runtime>;
 
+	#[runtime::pallet_index(82)]
+	pub type DelegatedStaking = pallet_delegated_staking::Pallet<Runtime>;
+
 	#[runtime::pallet_index(83)]
 	pub type AssetRewards = pallet_asset_rewards::Pallet<Runtime>;
 
diff --git a/substrate/bin/node/testing/src/genesis.rs b/substrate/bin/node/testing/src/genesis.rs
index 7f5364744c6..0394f6cd739 100644
--- a/substrate/bin/node/testing/src/genesis.rs
+++ b/substrate/bin/node/testing/src/genesis.rs
@@ -38,9 +38,9 @@ pub fn config_endowed(extra_endowed: Vec<AccountId>) -> RuntimeGenesisConfig {
 		(alice(), 111 * DOLLARS),
 		(bob(), 100 * DOLLARS),
 		(charlie(), 100_000_000 * DOLLARS),
-		(dave(), 111 * DOLLARS),
+		(dave(), 112 * DOLLARS),
 		(eve(), 101 * DOLLARS),
-		(ferdie(), 100 * DOLLARS),
+		(ferdie(), 101 * DOLLARS),
 	];
 
 	endowed.extend(extra_endowed.into_iter().map(|endowed| (endowed, 100 * DOLLARS)));
diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs
index 23857470adc..8d00509e800 100644
--- a/substrate/frame/babe/src/mock.rs
+++ b/substrate/frame/babe/src/mock.rs
@@ -157,6 +157,7 @@ impl onchain::Config for OnChainSeqPhragmen {
 
 #[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)]
 impl pallet_staking::Config for Test {
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type SessionsPerEra = SessionsPerEra;
 	type BondingDuration = BondingDuration;
diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs
index 7ae41c60918..38e0cc4cfc2 100644
--- a/substrate/frame/beefy/src/mock.rs
+++ b/substrate/frame/beefy/src/mock.rs
@@ -235,6 +235,7 @@ impl onchain::Config for OnChainSeqPhragmen {
 #[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)]
 impl pallet_staking::Config for Test {
 	type RuntimeEvent = RuntimeEvent;
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type AdminOrigin = frame_system::EnsureRoot<Self::AccountId>;
 	type SessionInterface = Self;
diff --git a/substrate/frame/delegated-staking/src/lib.rs b/substrate/frame/delegated-staking/src/lib.rs
index 1d181eb29ca..0dacfe9c557 100644
--- a/substrate/frame/delegated-staking/src/lib.rs
+++ b/substrate/frame/delegated-staking/src/lib.rs
@@ -520,7 +520,7 @@ impl<T: Config> Pallet<T> {
 		let stake = T::CoreStaking::stake(who)?;
 
 		// release funds from core staking.
-		T::CoreStaking::migrate_to_virtual_staker(who);
+		T::CoreStaking::migrate_to_virtual_staker(who)?;
 
 		// transfer just released staked amount plus any free amount.
 		let amount_to_transfer =
diff --git a/substrate/frame/delegated-staking/src/mock.rs b/substrate/frame/delegated-staking/src/mock.rs
index 811d5739f4e..875279864f7 100644
--- a/substrate/frame/delegated-staking/src/mock.rs
+++ b/substrate/frame/delegated-staking/src/mock.rs
@@ -102,6 +102,7 @@ impl onchain::Config for OnChainSeqPhragmen {
 
 #[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)]
 impl pallet_staking::Config for Runtime {
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type UnixTime = pallet_timestamp::Pallet<Self>;
 	type AdminOrigin = frame_system::EnsureRoot<Self::AccountId>;
diff --git a/substrate/frame/delegated-staking/src/tests.rs b/substrate/frame/delegated-staking/src/tests.rs
index b7b82a43771..c764e2741a2 100644
--- a/substrate/frame/delegated-staking/src/tests.rs
+++ b/substrate/frame/delegated-staking/src/tests.rs
@@ -671,12 +671,14 @@ mod staking_integration {
 			));
 			assert_ok!(Staking::nominate(RuntimeOrigin::signed(agent), vec![GENESIS_VALIDATOR],));
 			let init_stake = Staking::stake(&agent).unwrap();
+			// no extra provider added.
+			assert_eq!(System::providers(&agent), 1);
 
 			// scenario: 200 is a pool account, and the stake comes from its 4 delegators (300..304)
 			// in equal parts. lets try to migrate this nominator into delegate based stake.
 
 			// all balance currently is in 200
-			assert_eq!(pallet_staking::asset::stakeable_balance::<T>(&agent), agent_amount);
+			assert_eq!(pallet_staking::asset::total_balance::<T>(&agent), agent_amount);
 
 			// to migrate, nominator needs to set an account as a proxy delegator where staked funds
 			// will be moved and delegated back to this old nominator account. This should be funded
@@ -685,8 +687,9 @@ mod staking_integration {
 				DelegatedStaking::generate_proxy_delegator(Agent::from(agent)).get();
 
 			assert_ok!(DelegatedStaking::migrate_to_agent(RawOrigin::Signed(agent).into(), 201));
-			// after migration, funds are moved to proxy delegator, still a provider exists.
-			assert_eq!(System::providers(&agent), 1);
+			// after migration, no provider left since free balance is 0 and staking pallet released
+			// all funds.
+			assert_eq!(System::providers(&agent), 0);
 			assert_eq!(Balances::free_balance(agent), 0);
 			// proxy delegator has one provider as well with no free balance.
 			assert_eq!(System::providers(&proxy_delegator), 1);
@@ -798,8 +801,6 @@ mod staking_integration {
 				RawOrigin::Signed(agent).into(),
 				reward_acc
 			));
-			// becoming an agent adds another provider.
-			assert_eq!(System::providers(&agent), 2);
 
 			// delegate to this account
 			fund(&delegator, 1000);
diff --git a/substrate/frame/delegated-staking/src/types.rs b/substrate/frame/delegated-staking/src/types.rs
index a78aa3f5590..14f49466f0e 100644
--- a/substrate/frame/delegated-staking/src/types.rs
+++ b/substrate/frame/delegated-staking/src/types.rs
@@ -131,10 +131,6 @@ impl<T: Config> AgentLedger<T> {
 	///
 	/// Increments provider count if this is a new agent.
 	pub(crate) fn update(self, key: &T::AccountId) {
-		if !<Agents<T>>::contains_key(key) {
-			// This is a new agent. Provide for this account.
-			frame_system::Pallet::<T>::inc_providers(key);
-		}
 		<Agents<T>>::insert(key, self)
 	}
 
@@ -142,8 +138,6 @@ impl<T: Config> AgentLedger<T> {
 	pub(crate) fn remove(key: &T::AccountId) {
 		debug_assert!(<Agents<T>>::contains_key(key), "Agent should exist in storage");
 		<Agents<T>>::remove(key);
-		// Remove provider reference.
-		let _ = frame_system::Pallet::<T>::dec_providers(key).defensive();
 	}
 
 	/// Effective total balance of the `Agent`.
diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml
index 7a48ae868a5..f11f9c04dbf 100644
--- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml
+++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml
@@ -34,6 +34,7 @@ frame-system = { workspace = true, default-features = true }
 
 pallet-bags-list = { workspace = true, default-features = true }
 pallet-balances = { workspace = true, default-features = true }
+pallet-delegated-staking = { workspace = true, default-features = true }
 pallet-election-provider-multi-phase = { workspace = true, default-features = true }
 pallet-nomination-pools = { workspace = true, default-features = true }
 pallet-session = { workspace = true, default-features = true }
@@ -47,6 +48,7 @@ try-runtime = [
 	"frame-system/try-runtime",
 	"pallet-bags-list/try-runtime",
 	"pallet-balances/try-runtime",
+	"pallet-delegated-staking/try-runtime",
 	"pallet-election-provider-multi-phase/try-runtime",
 	"pallet-nomination-pools/try-runtime",
 	"pallet-session/try-runtime",
diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs
index 26a6345e145..b1029e89fe8 100644
--- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs
+++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs
@@ -327,8 +327,8 @@ fn automatic_unbonding_pools() {
 		assert_eq!(<Runtime as pallet_nomination_pools::Config>::MaxUnbonding::get(), 1);
 
 		// init state of pool members.
-		let init_stakeable_balance_2 = pallet_staking::asset::stakeable_balance::<Runtime>(&2);
-		let init_stakeable_balance_3 = pallet_staking::asset::stakeable_balance::<Runtime>(&3);
+		let init_free_balance_2 = Balances::free_balance(2);
+		let init_free_balance_3 = Balances::free_balance(3);
 
 		let pool_bonded_account = Pools::generate_bonded_account(1);
 
@@ -378,7 +378,7 @@ fn automatic_unbonding_pools() {
 		System::reset_events();
 
 		let staked_before_withdraw_pool = staked_amount_for(pool_bonded_account);
-		assert_eq!(pallet_staking::asset::stakeable_balance::<Runtime>(&pool_bonded_account), 26);
+		assert_eq!(delegated_balance_for(pool_bonded_account), 5 + 10 + 10);
 
 		// now unbonding 3 will work, although the pool's ledger still has the unlocking chunks
 		// filled up.
@@ -390,13 +390,13 @@ fn automatic_unbonding_pools() {
 			[
 				// auto-withdraw happened as expected to release 2's unbonding funds, but the funds
 				// were not transferred to 2 and stay in the pool's transferrable balance instead.
-				pallet_staking::Event::Withdrawn { stash: 7939698191839293293, amount: 10 },
-				pallet_staking::Event::Unbonded { stash: 7939698191839293293, amount: 10 }
+				pallet_staking::Event::Withdrawn { stash: pool_bonded_account, amount: 10 },
+				pallet_staking::Event::Unbonded { stash: pool_bonded_account, amount: 10 }
 			]
 		);
 
 		// balance of the pool remains the same, it hasn't withdraw explicitly from the pool yet.
-		assert_eq!(pallet_staking::asset::stakeable_balance::<Runtime>(&pool_bonded_account), 26);
+		assert_eq!(delegated_balance_for(pool_bonded_account), 25);
 		// but the locked amount in the pool's account decreases due to the auto-withdraw:
 		assert_eq!(staked_before_withdraw_pool - 10, staked_amount_for(pool_bonded_account));
 
@@ -405,12 +405,12 @@ fn automatic_unbonding_pools() {
 
 		// however, note that the withdrawing from the pool still works for 2, the funds are taken
 		// from the pool's non staked balance.
-		assert_eq!(pallet_staking::asset::stakeable_balance::<Runtime>(&pool_bonded_account), 26);
-		assert_eq!(pallet_staking::asset::staked::<Runtime>(&pool_bonded_account), 15);
+		assert_eq!(delegated_balance_for(pool_bonded_account), 25);
+		assert_eq!(staked_amount_for(pool_bonded_account), 15);
 		assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(2), 2, 10));
-		assert_eq!(pallet_staking::asset::stakeable_balance::<Runtime>(&pool_bonded_account), 16);
+		assert_eq!(delegated_balance_for(pool_bonded_account), 15);
 
-		assert_eq!(pallet_staking::asset::stakeable_balance::<Runtime>(&2), 20);
+		assert_eq!(Balances::free_balance(2), 20);
 		assert_eq!(TotalValueLocked::<Runtime>::get(), 15);
 
 		// 3 cannot withdraw yet.
@@ -429,15 +429,9 @@ fn automatic_unbonding_pools() {
 		assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(3), 3, 10));
 
 		// final conditions are the expected.
-		assert_eq!(pallet_staking::asset::stakeable_balance::<Runtime>(&pool_bonded_account), 6); // 5 init bonded + ED
-		assert_eq!(
-			pallet_staking::asset::stakeable_balance::<Runtime>(&2),
-			init_stakeable_balance_2
-		);
-		assert_eq!(
-			pallet_staking::asset::stakeable_balance::<Runtime>(&3),
-			init_stakeable_balance_3
-		);
+		assert_eq!(delegated_balance_for(pool_bonded_account), 5); // 5 init bonded
+		assert_eq!(Balances::free_balance(2), init_free_balance_2);
+		assert_eq!(Balances::free_balance(3), init_free_balance_3);
 
 		assert_eq!(TotalValueLocked::<Runtime>::get(), init_tvl);
 	});
diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
index eaab848c169..bcb25f8287b 100644
--- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
+++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
@@ -21,6 +21,7 @@ use frame_support::{
 	assert_ok, parameter_types, traits,
 	traits::{Hooks, UnfilteredDispatchable, VariantCountOf},
 	weights::constants,
+	PalletId,
 };
 use frame_system::EnsureRoot;
 use sp_core::{ConstU32, Get};
@@ -36,7 +37,7 @@ use sp_runtime::{
 };
 use sp_staking::{
 	offence::{OffenceDetails, OnOffenceHandler},
-	EraIndex, SessionIndex,
+	Agent, DelegationInterface, EraIndex, SessionIndex, StakingInterface,
 };
 use std::collections::BTreeMap;
 
@@ -68,6 +69,7 @@ frame_support::construct_runtime!(
 		System: frame_system,
 		ElectionProviderMultiPhase: pallet_election_provider_multi_phase,
 		Staking: pallet_staking,
+		DelegatedStaking: pallet_delegated_staking,
 		Pools: pallet_nomination_pools,
 		Balances: pallet_balances,
 		BagsList: pallet_bags_list,
@@ -77,7 +79,7 @@ frame_support::construct_runtime!(
 	}
 );
 
-pub(crate) type AccountId = u64;
+pub(crate) type AccountId = u128;
 pub(crate) type AccountIndex = u32;
 pub(crate) type BlockNumber = u32;
 pub(crate) type Balance = u64;
@@ -87,8 +89,10 @@ pub(crate) type Moment = u32;
 
 #[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
 impl frame_system::Config for Runtime {
+	type AccountId = AccountId;
 	type Block = Block;
 	type AccountData = pallet_balances::AccountData<Balance>;
+	type Lookup = sp_runtime::traits::IdentityLookup<Self::AccountId>;
 }
 
 const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75);
@@ -265,7 +269,8 @@ impl pallet_nomination_pools::Config for Runtime {
 	type RewardCounter = sp_runtime::FixedU128;
 	type BalanceToU256 = BalanceToU256;
 	type U256ToBalance = U256ToBalance;
-	type StakeAdapter = pallet_nomination_pools::adapter::TransferStake<Self, Staking>;
+	type StakeAdapter =
+		pallet_nomination_pools::adapter::DelegateStake<Self, Staking, DelegatedStaking>;
 	type PostUnbondingPoolsWindow = ConstU32<2>;
 	type PalletId = PoolsPalletId;
 	type MaxMetadataLen = ConstU32<256>;
@@ -274,6 +279,21 @@ impl pallet_nomination_pools::Config for Runtime {
 	type AdminOrigin = frame_system::EnsureRoot<Self::AccountId>;
 }
 
+parameter_types! {
+	pub const DelegatedStakingPalletId: PalletId = PalletId(*b"py/dlstk");
+	pub const SlashRewardFraction: Perbill = Perbill::from_percent(1);
+}
+
+impl pallet_delegated_staking::Config for Runtime {
+	type RuntimeEvent = RuntimeEvent;
+	type PalletId = DelegatedStakingPalletId;
+	type Currency = Balances;
+	type OnSlash = ();
+	type SlashRewardFraction = SlashRewardFraction;
+	type RuntimeHoldReason = RuntimeHoldReason;
+	type CoreStaking = Staking;
+}
+
 parameter_types! {
 	pub static MaxUnlockingChunks: u32 = 32;
 }
@@ -285,6 +305,7 @@ pub(crate) const SLASHING_DISABLING_FACTOR: usize = 3;
 
 #[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)]
 impl pallet_staking::Config for Runtime {
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type CurrencyBalance = Balance;
 	type UnixTime = Timestamp;
@@ -302,7 +323,7 @@ impl pallet_staking::Config for Runtime {
 	type NominationsQuota = pallet_staking::FixedNominationsQuota<MAX_QUOTA_NOMINATIONS>;
 	type TargetList = pallet_staking::UseValidatorsMap<Self>;
 	type MaxUnlockingChunks = MaxUnlockingChunks;
-	type EventListeners = Pools;
+	type EventListeners = (Pools, DelegatedStaking);
 	type WeightInfo = pallet_staking::weights::SubstrateWeight<Runtime>;
 	type DisablingStrategy =
 		pallet_staking::UpToLimitWithReEnablingDisablingStrategy<SLASHING_DISABLING_FACTOR>;
@@ -502,7 +523,7 @@ impl Default for BalancesExtBuilder {
 			(100, 100),
 			(200, 100),
 			// stashes
-			(11, 1000),
+			(11, 1100),
 			(21, 2000),
 			(31, 3000),
 			(41, 4000),
@@ -581,7 +602,7 @@ impl ExtBuilder {
 			// set the keys for the first session.
 			keys: stakers
 				.into_iter()
-				.map(|(id, ..)| (id, id, SessionKeys { other: (id as u64).into() }))
+				.map(|(id, ..)| (id, id, SessionKeys { other: (id as AccountId as u64).into() }))
 				.collect(),
 			..Default::default()
 		}
@@ -926,7 +947,11 @@ pub(crate) fn set_minimum_election_score(
 }
 
 pub(crate) fn staked_amount_for(account_id: AccountId) -> Balance {
-	pallet_staking::asset::staked::<Runtime>(&account_id)
+	Staking::total_stake(&account_id).expect("account must be staker")
+}
+
+pub(crate) fn delegated_balance_for(account_id: AccountId) -> Balance {
+	DelegatedStaking::agent_balance(Agent::from(account_id)).unwrap_or_default()
 }
 
 pub(crate) fn staking_events() -> Vec<pallet_staking::Event<Runtime>> {
diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs
index f044fc61018..cf4f5f49240 100644
--- a/substrate/frame/fast-unstake/src/mock.rs
+++ b/substrate/frame/fast-unstake/src/mock.rs
@@ -105,6 +105,7 @@ impl frame_election_provider_support::ElectionProvider for MockElection {
 
 #[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)]
 impl pallet_staking::Config for Runtime {
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type UnixTime = pallet_timestamp::Pallet<Self>;
 	type AdminOrigin = frame_system::EnsureRoot<Self::AccountId>;
@@ -223,8 +224,9 @@ impl ExtBuilder {
 				.clone()
 				.into_iter()
 				.map(|(stash, _, balance)| (stash, balance * 2))
-				.chain(validators_range.clone().map(|x| (x, 7 + 100)))
-				.chain(nominators_range.clone().map(|x| (x, 7 + 100)))
+				// give stakers enough balance for stake, ed and fast unstake deposit.
+				.chain(validators_range.clone().map(|x| (x, 7 + 1 + 100)))
+				.chain(nominators_range.clone().map(|x| (x, 7 + 1 + 100)))
 				.collect::<Vec<_>>(),
 		}
 		.assimilate_storage(&mut storage);
diff --git a/substrate/frame/fast-unstake/src/tests.rs b/substrate/frame/fast-unstake/src/tests.rs
index 7c11f381ca1..0fddb88e02b 100644
--- a/substrate/frame/fast-unstake/src/tests.rs
+++ b/substrate/frame/fast-unstake/src/tests.rs
@@ -19,7 +19,15 @@
 
 use super::*;
 use crate::{mock::*, types::*, Event};
-use frame_support::{pallet_prelude::*, testing_prelude::*, traits::Currency};
+use frame_support::{
+	pallet_prelude::*,
+	testing_prelude::*,
+	traits::{
+		fungible::Inspect,
+		tokens::{Fortitude::Polite, Preservation::Expendable},
+		Currency,
+	},
+};
 use pallet_staking::{CurrentEra, RewardDestination};
 
 use sp_runtime::traits::BadOrigin;
@@ -146,7 +154,7 @@ fn deregister_works() {
 
 		// Controller then changes mind and deregisters.
 		assert_ok!(FastUnstake::deregister(RuntimeOrigin::signed(1)));
-		assert_eq!(<T as Config>::Currency::reserved_balance(&1) - pre_reserved, 0);
+		assert_eq!(<T as Config>::Currency::reserved_balance(&1), pre_reserved);
 
 		// Ensure stash no longer exists in the queue.
 		assert_eq!(Queue::<T>::get(1), None);
@@ -297,7 +305,7 @@ mod on_idle {
 			);
 			assert_eq!(Queue::<T>::count(), 3);
 
-			assert_eq!(<T as Config>::Currency::reserved_balance(&1) - pre_reserved, 0);
+			assert_eq!(<T as Config>::Currency::reserved_balance(&1), pre_reserved);
 
 			assert_eq!(
 				fast_unstake_events_since_last_call(),
@@ -793,6 +801,8 @@ mod on_idle {
 				RuntimeOrigin::signed(VALIDATOR_PREFIX),
 				vec![VALIDATOR_PREFIX]
 			));
+
+			assert_eq!(Balances::reducible_balance(&VALIDATOR_PREFIX, Expendable, Polite), 7);
 			assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(VALIDATOR_PREFIX)));
 
 			// but they indeed are exposed!
diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs
index 87369c23948..0a85d9ffd2b 100644
--- a/substrate/frame/grandpa/src/mock.rs
+++ b/substrate/frame/grandpa/src/mock.rs
@@ -161,6 +161,7 @@ impl onchain::Config for OnChainSeqPhragmen {
 
 #[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)]
 impl pallet_staking::Config for Test {
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type CurrencyBalance = <Self as pallet_balances::Config>::Balance;
 	type SessionsPerEra = SessionsPerEra;
diff --git a/substrate/frame/nomination-pools/benchmarking/src/inner.rs b/substrate/frame/nomination-pools/benchmarking/src/inner.rs
index 7ddb78cca3f..20c5eafbcfc 100644
--- a/substrate/frame/nomination-pools/benchmarking/src/inner.rs
+++ b/substrate/frame/nomination-pools/benchmarking/src/inner.rs
@@ -132,6 +132,10 @@ fn migrate_to_transfer_stake<T: Config>(pool_id: PoolId) {
 			.expect("member should have enough balance to transfer");
 		});
 
+	// Pool needs to have ED balance free to stake so give it some.
+	// Note: we didn't require ED until pallet-staking migrated from locks to holds.
+	let _ = CurrencyOf::<T>::mint_into(&pool_acc, CurrencyOf::<T>::minimum_balance());
+
 	pallet_staking::Pallet::<T>::migrate_to_direct_staker(&pool_acc);
 }
 
@@ -141,14 +145,6 @@ fn vote_to_balance<T: pallet_nomination_pools::Config>(
 	vote.try_into().map_err(|_| "could not convert u64 to Balance")
 }
 
-/// `assertion` should strictly be true if the adapter is using `Delegate` strategy and strictly
-/// false if the adapter is not using `Delegate` strategy.
-fn assert_if_delegate<T: pallet_nomination_pools::Config>(assertion: bool) {
-	let legacy_adapter_used = T::StakeAdapter::strategy_type() != StakeStrategyType::Delegate;
-	// one and only one of the two should be true.
-	assert!(assertion ^ legacy_adapter_used);
-}
-
 #[allow(unused)]
 struct ListScenario<T: pallet_nomination_pools::Config> {
 	/// Stash/Controller that is expected to be moved.
@@ -981,9 +977,6 @@ mod benchmarks {
 
 	#[benchmark]
 	fn apply_slash() {
-		// Note: With older `TransferStake` strategy, slashing is greedy and apply_slash should
-		// always fail.
-
 		// We want to fill member's unbonding pools. So let's bond with big enough amount.
 		let deposit_amount =
 			Pools::<T>::depositor_min_bond() * T::MaxUnbonding::get().into() * 4u32.into();
@@ -993,7 +986,7 @@ mod benchmarks {
 		// verify user balance in the pool.
 		assert_eq!(PoolMembers::<T>::get(&depositor).unwrap().total_balance(), deposit_amount);
 		// verify delegated balance.
-		assert_if_delegate::<T>(
+		assert!(
 			T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) ==
 				Some(deposit_amount),
 		);
@@ -1017,7 +1010,7 @@ mod benchmarks {
 			deposit_amount / 2u32.into()
 		);
 		// verify delegated balance are not yet slashed.
-		assert_if_delegate::<T>(
+		assert!(
 			T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) ==
 				Some(deposit_amount),
 		);
@@ -1041,13 +1034,11 @@ mod benchmarks {
 
 		#[block]
 		{
-			assert_if_delegate::<T>(
-				Pools::<T>::apply_slash(
-					RuntimeOrigin::Signed(slash_reporter.clone()).into(),
-					depositor_lookup.clone(),
-				)
-				.is_ok(),
-			);
+			assert!(Pools::<T>::apply_slash(
+				RuntimeOrigin::Signed(slash_reporter.clone()).into(),
+				depositor_lookup.clone(),
+			)
+			.is_ok(),);
 		}
 
 		// verify balances are correct and slash applied.
@@ -1055,7 +1046,7 @@ mod benchmarks {
 			PoolMembers::<T>::get(&depositor).unwrap().total_balance(),
 			deposit_amount / 2u32.into()
 		);
-		assert_if_delegate::<T>(
+		assert!(
 			T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) ==
 				Some(deposit_amount / 2u32.into()),
 		);
@@ -1126,18 +1117,16 @@ mod benchmarks {
 		let _ = migrate_to_transfer_stake::<T>(1);
 		#[block]
 		{
-			assert_if_delegate::<T>(
-				Pools::<T>::migrate_pool_to_delegate_stake(
-					RuntimeOrigin::Signed(depositor.clone()).into(),
-					1u32.into(),
-				)
-				.is_ok(),
-			);
+			assert!(Pools::<T>::migrate_pool_to_delegate_stake(
+				RuntimeOrigin::Signed(depositor.clone()).into(),
+				1u32.into(),
+			)
+			.is_ok(),);
 		}
-		// this queries agent balance if `DelegateStake` strategy.
+		// this queries agent balance.
 		assert_eq!(
 			T::StakeAdapter::total_balance(Pool::from(pool_account.clone())),
-			Some(deposit_amount)
+			Some(deposit_amount + CurrencyOf::<T>::minimum_balance())
 		);
 	}
 
@@ -1152,13 +1141,11 @@ mod benchmarks {
 		let _ = migrate_to_transfer_stake::<T>(1);
 
 		// Now migrate pool to delegate stake keeping delegators unmigrated.
-		assert_if_delegate::<T>(
-			Pools::<T>::migrate_pool_to_delegate_stake(
-				RuntimeOrigin::Signed(depositor.clone()).into(),
-				1u32.into(),
-			)
-			.is_ok(),
-		);
+		assert!(Pools::<T>::migrate_pool_to_delegate_stake(
+			RuntimeOrigin::Signed(depositor.clone()).into(),
+			1u32.into(),
+		)
+		.is_ok(),);
 
 		// delegation does not exist.
 		assert!(
@@ -1171,16 +1158,14 @@ mod benchmarks {
 
 		#[block]
 		{
-			assert_if_delegate::<T>(
-				Pools::<T>::migrate_delegation(
-					RuntimeOrigin::Signed(depositor.clone()).into(),
-					depositor_lookup.clone(),
-				)
-				.is_ok(),
-			);
+			assert!(Pools::<T>::migrate_delegation(
+				RuntimeOrigin::Signed(depositor.clone()).into(),
+				depositor_lookup.clone(),
+			)
+			.is_ok(),);
 		}
 		// verify balances once more.
-		assert_if_delegate::<T>(
+		assert!(
 			T::StakeAdapter::member_delegation_balance(Member::from(depositor.clone())) ==
 				Some(deposit_amount),
 		);
diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs
index 15d9e2c5603..7c09cf22ad5 100644
--- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs
+++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs
@@ -78,6 +78,7 @@ parameter_types! {
 }
 #[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)]
 impl pallet_staking::Config for Runtime {
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type CurrencyBalance = Balance;
 	type UnixTime = pallet_timestamp::Pallet<Self>;
diff --git a/substrate/frame/nomination-pools/src/adapter.rs b/substrate/frame/nomination-pools/src/adapter.rs
index f125919dabf..f1c68af4ea6 100644
--- a/substrate/frame/nomination-pools/src/adapter.rs
+++ b/substrate/frame/nomination-pools/src/adapter.rs
@@ -16,6 +16,7 @@
 // limitations under the License.
 
 use crate::*;
+use frame_support::traits::tokens::{Fortitude::Polite, Preservation::Expendable};
 use sp_staking::{Agent, DelegationInterface, DelegationMigrator, Delegator};
 
 /// Types of stake strategies.
@@ -245,8 +246,10 @@ pub trait StakeStrategy {
 /// strategy in an existing runtime, storage migration is required. See
 /// [`migration::unversioned::DelegationStakeMigration`]. For new runtimes, it is highly recommended
 /// to use the [`DelegateStake`] strategy.
+#[deprecated = "consider migrating to DelegateStake"]
 pub struct TransferStake<T: Config, Staking: StakingInterface>(PhantomData<(T, Staking)>);
 
+#[allow(deprecated)]
 impl<T: Config, Staking: StakingInterface<Balance = BalanceOf<T>, AccountId = T::AccountId>>
 	StakeStrategy for TransferStake<T, Staking>
 {
@@ -262,7 +265,8 @@ impl<T: Config, Staking: StakingInterface<Balance = BalanceOf<T>, AccountId = T:
 		pool_account: Pool<Self::AccountId>,
 		_: Member<Self::AccountId>,
 	) -> BalanceOf<T> {
-		T::Currency::balance(&pool_account.0).saturating_sub(Self::active_stake(pool_account))
+		// free/liquid balance of the pool account.
+		T::Currency::reducible_balance(&pool_account.get(), Expendable, Polite)
 	}
 
 	fn total_balance(pool_account: Pool<Self::AccountId>) -> Option<BalanceOf<T>> {
diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs
index f544e79ec48..f4552389a26 100644
--- a/substrate/frame/nomination-pools/src/mock.rs
+++ b/substrate/frame/nomination-pools/src/mock.rs
@@ -23,8 +23,10 @@ use frame_support::{
 	PalletId,
 };
 use frame_system::{EnsureSignedBy, RawOrigin};
-use sp_runtime::{BuildStorage, FixedU128};
-use sp_staking::{OnStakingUpdate, Stake};
+use sp_runtime::{BuildStorage, DispatchResult, FixedU128};
+use sp_staking::{
+	Agent, DelegationInterface, DelegationMigrator, Delegator, OnStakingUpdate, Stake,
+};
 
 pub type BlockNumber = u64;
 pub type AccountId = u128;
@@ -76,6 +78,7 @@ impl StakingMock {
 		let bonded = BondedBalanceMap::get();
 		let pre_total = bonded.get(&acc).unwrap();
 		Self::set_bonded_balance(acc, pre_total - amount);
+		DelegateMock::on_slash(acc, amount);
 		Pools::on_slash(&acc, pre_total - amount, &Default::default(), amount);
 	}
 }
@@ -112,8 +115,8 @@ impl sp_staking::StakingInterface for StakingMock {
 			.ok_or(DispatchError::Other("NotStash"))
 	}
 
-	fn is_virtual_staker(_who: &Self::AccountId) -> bool {
-		false
+	fn is_virtual_staker(who: &Self::AccountId) -> bool {
+		AgentBalanceMap::get().contains_key(who)
 	}
 
 	fn bond_extra(who: &Self::AccountId, extra: Self::Balance) -> DispatchResult {
@@ -162,7 +165,9 @@ impl sp_staking::StakingInterface for StakingMock {
 		staker_map.retain(|(unlocking_at, _amount)| *unlocking_at > current_era);
 
 		// if there was a withdrawal, notify the pallet.
-		Pools::on_withdraw(&who, unlocking_before.saturating_sub(unlocking(&staker_map)));
+		let withdraw_amount = unlocking_before.saturating_sub(unlocking(&staker_map));
+		Pools::on_withdraw(&who, withdraw_amount);
+		DelegateMock::on_withdraw(who, withdraw_amount);
 
 		UnbondingBalanceMap::set(&unbonding_map);
 		Ok(UnbondingBalanceMap::get().get(&who).unwrap().is_empty() &&
@@ -239,6 +244,176 @@ impl sp_staking::StakingInterface for StakingMock {
 	}
 }
 
+parameter_types! {
+	// Map of agent to their (delegated balance, unclaimed withdrawal, pending slash).
+	pub storage AgentBalanceMap: BTreeMap<AccountId, (Balance, Balance, Balance)> = Default::default();
+	pub storage DelegatorBalanceMap: BTreeMap<AccountId, Balance> = Default::default();
+}
+pub struct DelegateMock;
+impl DelegationInterface for DelegateMock {
+	type Balance = Balance;
+	type AccountId = AccountId;
+	fn agent_balance(agent: Agent<Self::AccountId>) -> Option<Self::Balance> {
+		AgentBalanceMap::get()
+			.get(&agent.get())
+			.copied()
+			.map(|(delegated, _, pending)| delegated - pending)
+	}
+
+	fn agent_transferable_balance(agent: Agent<Self::AccountId>) -> Option<Self::Balance> {
+		AgentBalanceMap::get()
+			.get(&agent.get())
+			.copied()
+			.map(|(_, unclaimed_withdrawals, _)| unclaimed_withdrawals)
+	}
+
+	fn delegator_balance(delegator: Delegator<Self::AccountId>) -> Option<Self::Balance> {
+		DelegatorBalanceMap::get().get(&delegator.get()).copied()
+	}
+
+	fn register_agent(
+		agent: Agent<Self::AccountId>,
+		_reward_account: &Self::AccountId,
+	) -> DispatchResult {
+		let mut agents = AgentBalanceMap::get();
+		agents.insert(agent.get(), (0, 0, 0));
+		AgentBalanceMap::set(&agents);
+		Ok(())
+	}
+
+	fn remove_agent(agent: Agent<Self::AccountId>) -> DispatchResult {
+		let mut agents = AgentBalanceMap::get();
+		let agent = agent.get();
+		assert!(agents.contains_key(&agent));
+		agents.remove(&agent);
+		AgentBalanceMap::set(&agents);
+		Ok(())
+	}
+
+	fn delegate(
+		delegator: Delegator<Self::AccountId>,
+		agent: Agent<Self::AccountId>,
+		amount: Self::Balance,
+	) -> DispatchResult {
+		let delegator = delegator.get();
+		let mut delegators = DelegatorBalanceMap::get();
+		delegators.entry(delegator).and_modify(|b| *b += amount).or_insert(amount);
+		DelegatorBalanceMap::set(&delegators);
+
+		let agent = agent.get();
+		let mut agents = AgentBalanceMap::get();
+		agents
+			.get_mut(&agent)
+			.map(|(d, _, _)| *d += amount)
+			.ok_or(DispatchError::Other("agent not registered"))?;
+		AgentBalanceMap::set(&agents);
+
+		if BondedBalanceMap::get().contains_key(&agent) {
+			StakingMock::bond_extra(&agent, amount)
+		} else {
+			// reward account does not matter in this context.
+			StakingMock::bond(&agent, amount, &999)
+		}
+	}
+
+	fn withdraw_delegation(
+		delegator: Delegator<Self::AccountId>,
+		agent: Agent<Self::AccountId>,
+		amount: Self::Balance,
+		_num_slashing_spans: u32,
+	) -> DispatchResult {
+		let mut delegators = DelegatorBalanceMap::get();
+		delegators.get_mut(&delegator.get()).map(|b| *b -= amount);
+		DelegatorBalanceMap::set(&delegators);
+
+		let mut agents = AgentBalanceMap::get();
+		agents.get_mut(&agent.get()).map(|(d, u, _)| {
+			*d -= amount;
+			*u -= amount;
+		});
+		AgentBalanceMap::set(&agents);
+
+		Ok(())
+	}
+
+	fn pending_slash(agent: Agent<Self::AccountId>) -> Option<Self::Balance> {
+		AgentBalanceMap::get()
+			.get(&agent.get())
+			.copied()
+			.map(|(_, _, pending_slash)| pending_slash)
+	}
+
+	fn delegator_slash(
+		agent: Agent<Self::AccountId>,
+		delegator: Delegator<Self::AccountId>,
+		value: Self::Balance,
+		_maybe_reporter: Option<Self::AccountId>,
+	) -> DispatchResult {
+		let mut delegators = DelegatorBalanceMap::get();
+		delegators.get_mut(&delegator.get()).map(|b| *b -= value);
+		DelegatorBalanceMap::set(&delegators);
+
+		let mut agents = AgentBalanceMap::get();
+		agents.get_mut(&agent.get()).map(|(_, _, p)| {
+			p.saturating_reduce(value);
+		});
+		AgentBalanceMap::set(&agents);
+
+		Ok(())
+	}
+}
+
+impl DelegateMock {
+	pub fn set_agent_balance(who: AccountId, delegated: Balance) {
+		let mut agents = AgentBalanceMap::get();
+		agents.insert(who, (delegated, 0, 0));
+		AgentBalanceMap::set(&agents);
+	}
+
+	pub fn set_delegator_balance(who: AccountId, amount: Balance) {
+		let mut delegators = DelegatorBalanceMap::get();
+		delegators.insert(who, amount);
+		DelegatorBalanceMap::set(&delegators);
+	}
+
+	pub fn on_slash(agent: AccountId, amount: Balance) {
+		let mut agents = AgentBalanceMap::get();
+		agents.get_mut(&agent).map(|(_, _, p)| *p += amount);
+		AgentBalanceMap::set(&agents);
+	}
+
+	fn on_withdraw(agent: AccountId, amount: Balance) {
+		let mut agents = AgentBalanceMap::get();
+		// if agent exists, add the amount to unclaimed withdrawals.
+		agents.get_mut(&agent).map(|(_, u, _)| *u += amount);
+		AgentBalanceMap::set(&agents);
+	}
+}
+
+impl DelegationMigrator for DelegateMock {
+	type Balance = Balance;
+	type AccountId = AccountId;
+	fn migrate_nominator_to_agent(
+		_agent: Agent<Self::AccountId>,
+		_reward_account: &Self::AccountId,
+	) -> DispatchResult {
+		unimplemented!("not used in current unit tests")
+	}
+
+	fn migrate_delegation(
+		_agent: Agent<Self::AccountId>,
+		_delegator: Delegator<Self::AccountId>,
+		_value: Self::Balance,
+	) -> DispatchResult {
+		unimplemented!("not used in current unit tests")
+	}
+
+	#[cfg(feature = "runtime-benchmarks")]
+	fn force_kill_agent(_agent: Agent<Self::AccountId>) {
+		unimplemented!("not used in current unit tests")
+	}
+}
+
 #[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
 impl frame_system::Config for Runtime {
 	type Nonce = u64;
@@ -295,7 +470,7 @@ impl pools::Config for Runtime {
 	type RewardCounter = RewardCounter;
 	type BalanceToU256 = BalanceToU256;
 	type U256ToBalance = U256ToBalance;
-	type StakeAdapter = adapter::TransferStake<Self, StakingMock>;
+	type StakeAdapter = adapter::DelegateStake<Self, StakingMock, DelegateMock>;
 	type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow;
 	type PalletId = PoolsPalletId;
 	type MaxMetadataLen = MaxMetadataLen;
@@ -522,6 +697,21 @@ pub fn reward_imbalance(pool: PoolId) -> RewardImbalance {
 	}
 }
 
+pub fn set_pool_balance(who: AccountId, amount: Balance) {
+	StakingMock::set_bonded_balance(who, amount);
+	DelegateMock::set_agent_balance(who, amount);
+}
+
+pub fn member_delegation(who: AccountId) -> Balance {
+	<T as Config>::StakeAdapter::member_delegation_balance(Member::from(who))
+		.expect("who must be a pool member")
+}
+
+pub fn pool_balance(id: PoolId) -> Balance {
+	<T as Config>::StakeAdapter::total_balance(Pool::from(Pools::generate_bonded_account(id)))
+		.expect("who must be a bonded pool account")
+}
+
 #[cfg(test)]
 mod test {
 	use super::*;
diff --git a/substrate/frame/nomination-pools/src/tests.rs b/substrate/frame/nomination-pools/src/tests.rs
index 06261699a5b..c46638d2f8f 100644
--- a/substrate/frame/nomination-pools/src/tests.rs
+++ b/substrate/frame/nomination-pools/src/tests.rs
@@ -24,6 +24,7 @@ use sp_runtime::{
 	traits::{BadOrigin, Dispatchable},
 	FixedU128,
 };
+use sp_staking::{Agent, DelegationInterface};
 
 macro_rules! unbonding_pools_with_era {
 	($($k:expr => $v:expr),* $(,)?) => {{
@@ -127,41 +128,41 @@ mod bonded_pool {
 			};
 
 			// 1 points : 1 balance ratio
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), 100);
+			set_pool_balance(bonded_pool.bonded_account(), 100);
 			assert_eq!(bonded_pool.balance_to_point(10), 10);
 			assert_eq!(bonded_pool.balance_to_point(0), 0);
 
 			// 2 points : 1 balance ratio
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), 50);
+			set_pool_balance(bonded_pool.bonded_account(), 50);
 			assert_eq!(bonded_pool.balance_to_point(10), 20);
 
 			// 1 points : 2 balance ratio
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), 100);
+			set_pool_balance(bonded_pool.bonded_account(), 100);
 			bonded_pool.points = 50;
 			assert_eq!(bonded_pool.balance_to_point(10), 5);
 
 			// 100 points : 0 balance ratio
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), 0);
+			set_pool_balance(bonded_pool.bonded_account(), 0);
 			bonded_pool.points = 100;
 			assert_eq!(bonded_pool.balance_to_point(10), 100 * 10);
 
 			// 0 points : 100 balance
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), 100);
+			set_pool_balance(bonded_pool.bonded_account(), 100);
 			bonded_pool.points = 0;
 			assert_eq!(bonded_pool.balance_to_point(10), 10);
 
 			// 10 points : 3 balance ratio
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), 30);
+			set_pool_balance(bonded_pool.bonded_account(), 30);
 			bonded_pool.points = 100;
 			assert_eq!(bonded_pool.balance_to_point(10), 33);
 
 			// 2 points : 3 balance ratio
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), 300);
+			set_pool_balance(bonded_pool.bonded_account(), 300);
 			bonded_pool.points = 200;
 			assert_eq!(bonded_pool.balance_to_point(10), 6);
 
 			// 4 points : 9 balance ratio
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), 900);
+			set_pool_balance(bonded_pool.bonded_account(), 900);
 			bonded_pool.points = 400;
 			assert_eq!(bonded_pool.balance_to_point(90), 40);
 		})
@@ -182,7 +183,7 @@ mod bonded_pool {
 				},
 			};
 
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), 100);
+			set_pool_balance(bonded_pool.bonded_account(), 100);
 			assert_eq!(bonded_pool.points_to_balance(10), 10);
 			assert_eq!(bonded_pool.points_to_balance(0), 0);
 
@@ -191,27 +192,27 @@ mod bonded_pool {
 			assert_eq!(bonded_pool.points_to_balance(10), 20);
 
 			// 100 balance : 0 points ratio
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), 100);
+			set_pool_balance(bonded_pool.bonded_account(), 100);
 			bonded_pool.points = 0;
 			assert_eq!(bonded_pool.points_to_balance(10), 0);
 
 			// 0 balance : 100 points ratio
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), 0);
+			set_pool_balance(bonded_pool.bonded_account(), 0);
 			bonded_pool.points = 100;
 			assert_eq!(bonded_pool.points_to_balance(10), 0);
 
 			// 10 balance : 3 points ratio
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), 100);
+			set_pool_balance(bonded_pool.bonded_account(), 100);
 			bonded_pool.points = 30;
 			assert_eq!(bonded_pool.points_to_balance(10), 33);
 
 			// 2 balance : 3 points ratio
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), 200);
+			set_pool_balance(bonded_pool.bonded_account(), 200);
 			bonded_pool.points = 300;
 			assert_eq!(bonded_pool.points_to_balance(10), 6);
 
 			// 4 balance : 9 points ratio
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), 400);
+			set_pool_balance(bonded_pool.bonded_account(), 400);
 			bonded_pool.points = 900;
 			assert_eq!(bonded_pool.points_to_balance(90), 40);
 		})
@@ -269,30 +270,21 @@ mod bonded_pool {
 				<<Runtime as Config>::MaxPointsToBalance as Get<u8>>::get().into();
 
 			// Simulate a 100% slashed pool
-			StakingMock::set_bonded_balance(pool.bonded_account(), 0);
+			set_pool_balance(pool.bonded_account(), 0);
 			assert_noop!(pool.ok_to_join(), Error::<Runtime>::OverflowRisk);
 
 			// Simulate a slashed pool at `MaxPointsToBalance` + 1 slashed pool
-			StakingMock::set_bonded_balance(
-				pool.bonded_account(),
-				max_points_to_balance.saturating_add(1),
-			);
+			set_pool_balance(pool.bonded_account(), max_points_to_balance.saturating_add(1));
 			assert_ok!(pool.ok_to_join());
 
 			// Simulate a slashed pool at `MaxPointsToBalance`
-			StakingMock::set_bonded_balance(pool.bonded_account(), max_points_to_balance);
+			set_pool_balance(pool.bonded_account(), max_points_to_balance);
 			assert_noop!(pool.ok_to_join(), Error::<Runtime>::OverflowRisk);
 
-			StakingMock::set_bonded_balance(
-				pool.bonded_account(),
-				Balance::MAX / max_points_to_balance,
-			);
+			set_pool_balance(pool.bonded_account(), Balance::MAX / max_points_to_balance);
 
 			// and a sanity check
-			StakingMock::set_bonded_balance(
-				pool.bonded_account(),
-				Balance::MAX / max_points_to_balance - 1,
-			);
+			set_pool_balance(pool.bonded_account(), Balance::MAX / max_points_to_balance - 1);
 			assert_ok!(pool.ok_to_join());
 		});
 	}
@@ -310,7 +302,7 @@ mod bonded_pool {
 					state: PoolState::Open,
 				},
 			};
-			StakingMock::set_bonded_balance(bonded_pool.bonded_account(), u128::MAX);
+			set_pool_balance(bonded_pool.bonded_account(), u128::MAX);
 
 			// Max out the points and balance of the pool and make sure the conversion works as
 			// expected and does not overflow.
@@ -640,8 +632,6 @@ mod sub_pools {
 }
 
 mod join {
-	use sp_runtime::TokenError;
-
 	use super::*;
 
 	#[test]
@@ -728,7 +718,7 @@ mod join {
 			);
 
 			// Force the pools bonded balance to 0, simulating a 100% slash
-			StakingMock::set_bonded_balance(Pools::generate_bonded_account(1), 0);
+			set_pool_balance(Pools::generate_bonded_account(1), 0);
 			assert_noop!(
 				Pools::join(RuntimeOrigin::signed(11), 420, 1),
 				Error::<Runtime>::OverflowRisk
@@ -754,29 +744,13 @@ mod join {
 			let max_points_to_balance: u128 =
 				<<Runtime as Config>::MaxPointsToBalance as Get<u8>>::get().into();
 
-			StakingMock::set_bonded_balance(
-				Pools::generate_bonded_account(123),
-				max_points_to_balance,
-			);
+			set_pool_balance(Pools::generate_bonded_account(123), max_points_to_balance);
 			assert_noop!(
 				Pools::join(RuntimeOrigin::signed(11), 420, 123),
 				Error::<Runtime>::OverflowRisk
 			);
 
-			StakingMock::set_bonded_balance(
-				Pools::generate_bonded_account(123),
-				Balance::MAX / max_points_to_balance,
-			);
-			// Balance needs to be gt Balance::MAX / `MaxPointsToBalance`
-			assert_noop!(
-				Pools::join(RuntimeOrigin::signed(11), 5, 123),
-				TokenError::FundsUnavailable,
-			);
-
-			StakingMock::set_bonded_balance(
-				Pools::generate_bonded_account(1),
-				max_points_to_balance,
-			);
+			set_pool_balance(Pools::generate_bonded_account(1), max_points_to_balance);
 
 			// Cannot join a pool that isn't open
 			unsafe_set_state(123, PoolState::Blocked);
@@ -807,7 +781,7 @@ mod join {
 	#[cfg_attr(not(debug_assertions), should_panic)]
 	fn join_panics_when_reward_pool_not_found() {
 		ExtBuilder::default().build_and_execute(|| {
-			StakingMock::set_bonded_balance(Pools::generate_bonded_account(123), 100);
+			set_pool_balance(Pools::generate_bonded_account(123), 100);
 			BondedPool::<Runtime> {
 				id: 123,
 				inner: BondedPoolInner {
@@ -2321,8 +2295,8 @@ mod claim_payout {
 	fn rewards_are_rounded_down_depositor_collects_them() {
 		ExtBuilder::default().add_members(vec![(20, 20)]).build_and_execute(|| {
 			// initial balance of 10.
-
-			assert_eq!(Currency::free_balance(&10), 35);
+			let init_balance_10 = Currency::free_balance(&10);
+			assert_eq!(member_delegation(10), 10);
 			assert_eq!(
 				Currency::free_balance(&default_reward_account()),
 				Currency::minimum_balance()
@@ -2373,8 +2347,10 @@ mod claim_payout {
 			);
 
 			assert!(!Metadata::<T>::contains_key(1));
-			// original ed + ed put into reward account + reward + bond + dust.
-			assert_eq!(Currency::free_balance(&10), 35 + 5 + 13 + 10 + 1);
+			// original ed + ed put into reward account + reward + dust.
+			assert_eq!(Currency::free_balance(&10), init_balance_10 + 5 + 13 + 1);
+			// delegation reduced from 10 to 0.
+			assert_eq!(member_delegation(10), 0);
 		})
 	}
 
@@ -2444,9 +2420,10 @@ mod claim_payout {
 			let claimable_reward = 8 - ExistentialDeposit::get();
 			// NOTE: easier to read if we use 3, so let's use the number instead of variable.
 			assert_eq!(claimable_reward, 3, "test is correct if rewards are divisible by 3");
+			let init_balance = Currency::free_balance(&10);
 
 			// given
-			assert_eq!(Currency::free_balance(&10), 35);
+			assert_eq!(member_delegation(10), 10);
 
 			// when
 
@@ -2455,7 +2432,10 @@ mod claim_payout {
 			assert_ok!(Pools::claim_payout_other(RuntimeOrigin::signed(80), 10));
 
 			// then
-			assert_eq!(Currency::free_balance(&10), 36);
+			// delegated balance does not change.
+			assert_eq!(member_delegation(10), 10);
+			// reward of 1 is paid out to 10.
+			assert_eq!(Currency::free_balance(&10), init_balance + 1);
 			assert_eq!(Currency::free_balance(&default_reward_account()), 7);
 		})
 	}
@@ -2818,6 +2798,8 @@ mod unbond {
 		ExtBuilder::default()
 			.add_members(vec![(40, 40), (550, 550)])
 			.build_and_execute(|| {
+				let init_balance_40 = Currency::free_balance(&40);
+				let init_balance_550 = Currency::free_balance(&550);
 				let ed = Currency::minimum_balance();
 				// Given a slash from 600 -> 500
 				StakingMock::slash_by(1, 500);
@@ -2864,7 +2846,9 @@ mod unbond {
 					PoolMembers::<Runtime>::get(40).unwrap().unbonding_eras,
 					member_unbonding_eras!(3 => 6)
 				);
-				assert_eq!(Currency::free_balance(&40), 40 + 40); // We claim rewards when unbonding
+				assert_eq!(member_delegation(40), 40);
+				// We claim rewards when unbonding
+				assert_eq!(Currency::free_balance(&40), init_balance_40 + 40);
 
 				// When
 				unsafe_set_state(1, PoolState::Destroying);
@@ -2893,7 +2877,8 @@ mod unbond {
 					PoolMembers::<Runtime>::get(550).unwrap().unbonding_eras,
 					member_unbonding_eras!(3 => 92)
 				);
-				assert_eq!(Currency::free_balance(&550), 550 + 550);
+				assert_eq!(member_delegation(550), 550);
+				assert_eq!(Currency::free_balance(&550), init_balance_550 + 550);
 				assert_eq!(
 					pool_events_since_last_call(),
 					vec![
@@ -2934,7 +2919,8 @@ mod unbond {
 				);
 				assert_eq!(StakingMock::active_stake(&default_bonded_account()).unwrap(), 0);
 
-				assert_eq!(Currency::free_balance(&550), 550 + 550 + 92);
+				// 550 is removed from pool.
+				assert_eq!(member_delegation(550), 0);
 				assert_eq!(
 					pool_events_since_last_call(),
 					vec![
@@ -3532,7 +3518,7 @@ mod pool_withdraw_unbonded {
 
 			assert_eq!(StakingMock::active_stake(&default_bonded_account()), Ok(15));
 			assert_eq!(StakingMock::total_stake(&default_bonded_account()), Ok(20));
-			assert_eq!(Balances::free_balance(&default_bonded_account()), 20);
+			assert_eq!(pool_balance(1), 20);
 
 			// When
 			CurrentEra::set(StakingMock::current_era() + StakingMock::bonding_duration() + 1);
@@ -3541,7 +3527,7 @@ mod pool_withdraw_unbonded {
 			// Then their unbonding balance is no longer locked
 			assert_eq!(StakingMock::active_stake(&default_bonded_account()), Ok(15));
 			assert_eq!(StakingMock::total_stake(&default_bonded_account()), Ok(15));
-			assert_eq!(Balances::free_balance(&default_bonded_account()), 20);
+			assert_eq!(pool_balance(1), 20);
 		});
 	}
 	#[test]
@@ -3552,7 +3538,7 @@ mod pool_withdraw_unbonded {
 
 			assert_eq!(StakingMock::active_stake(&default_bonded_account()), Ok(15));
 			assert_eq!(StakingMock::total_stake(&default_bonded_account()), Ok(20));
-			assert_eq!(Balances::free_balance(&default_bonded_account()), 20);
+			assert_eq!(pool_balance(1), 20);
 			assert_eq!(TotalValueLocked::<T>::get(), 20);
 
 			// When
@@ -3568,14 +3554,14 @@ mod pool_withdraw_unbonded {
 			// Then their unbonding balance is no longer locked
 			assert_eq!(StakingMock::active_stake(&default_bonded_account()), Ok(15));
 			assert_eq!(StakingMock::total_stake(&default_bonded_account()), Ok(15));
-			assert_eq!(Currency::free_balance(&default_bonded_account()), 20);
+			assert_eq!(pool_balance(1), 20);
 
 			// The difference between TVL and member_balance is exactly the difference between
-			// `total_stake` and the `free_balance`.
-			// This relation is not guaranteed in the wild as arbitrary transfers towards
-			// `free_balance` can be made to the pool that are not accounted for.
-			let non_locked_balance = Balances::free_balance(&default_bonded_account()) -
-				StakingMock::total_stake(&default_bonded_account()).unwrap();
+			// `pool balance` (sum of all balance delegated to pool) and the `staked balance`.
+			// This is the withdrawn funds from the pool stake that has not yet been claimed by the
+			// respective members.
+			let non_locked_balance =
+				pool_balance(1) - StakingMock::total_stake(&default_bonded_account()).unwrap();
 			assert_eq!(member_balance, TotalValueLocked::<T>::get() + non_locked_balance);
 		});
 	}
@@ -3597,7 +3583,7 @@ mod withdraw_unbonded {
 				assert_eq!(StakingMock::bonding_duration(), 3);
 				assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(550), 550));
 				assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(40), 40));
-				assert_eq!(Currency::free_balance(&default_bonded_account()), 600);
+				assert_eq!(pool_balance(1), 600);
 
 				let mut current_era = 1;
 				CurrentEra::set(current_era);
@@ -3626,10 +3612,7 @@ mod withdraw_unbonded {
 						.1 /= 2;
 					UnbondingBalanceMap::set(&x);
 
-					Currency::set_balance(
-						&default_bonded_account(),
-						Currency::free_balance(&default_bonded_account()) / 2, // 300
-					);
+					set_pool_balance(1, pool_balance(1) / 2);
 					assert_eq!(StakingMock::active_stake(&default_bonded_account()).unwrap(), 10);
 					StakingMock::slash_by(1, 5);
 					assert_eq!(StakingMock::active_stake(&default_bonded_account()).unwrap(), 5);
@@ -3671,11 +3654,6 @@ mod withdraw_unbonded {
 						Event::PoolSlashed { pool_id: 1, balance: 5 }
 					]
 				);
-				assert_eq!(
-					balances_events_since_last_call(),
-					vec![BEvent::Burned { who: default_bonded_account(), amount: 300 }]
-				);
-
 				// When
 				assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(550), 550, 0));
 
@@ -3691,10 +3669,9 @@ mod withdraw_unbonded {
 						Event::MemberRemoved { pool_id: 1, member: 550, released_balance: 0 }
 					]
 				);
-				assert_eq!(
-					balances_events_since_last_call(),
-					vec![BEvent::Transfer { from: default_bonded_account(), to: 550, amount: 275 }]
-				);
+
+				// member has 40 tokens in delegation, but only 20 can be withdrawan.
+				assert_eq!(member_delegation(40), 40);
 
 				// When
 				assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(40), 40, 0));
@@ -3708,18 +3685,18 @@ mod withdraw_unbonded {
 				assert_eq!(
 					pool_events_since_last_call(),
 					vec![
+						// out of 40, 20 is withdrawn.
 						Event::Withdrawn { member: 40, pool_id: 1, balance: 20, points: 40 },
-						Event::MemberRemoved { pool_id: 1, member: 40, released_balance: 0 }
+						// member is removed and the dangling delegation of 20 tokens left in their
+						// account is released.
+						Event::MemberRemoved { pool_id: 1, member: 40, released_balance: 20 }
 					]
 				);
-				assert_eq!(
-					balances_events_since_last_call(),
-					vec![BEvent::Transfer { from: default_bonded_account(), to: 40, amount: 20 }]
-				);
 
 				// now, finally, the depositor can take out its share.
 				unsafe_set_state(1, PoolState::Destroying);
 				assert_ok!(fully_unbond_permissioned(10));
+				assert_eq!(member_delegation(10), 10);
 
 				current_era += 3;
 				CurrentEra::set(current_era);
@@ -3731,7 +3708,9 @@ mod withdraw_unbonded {
 					vec![
 						Event::Unbonded { member: 10, pool_id: 1, balance: 5, points: 5, era: 9 },
 						Event::Withdrawn { member: 10, pool_id: 1, balance: 5, points: 5 },
-						Event::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 },
+						// when member is removed, any leftover delegation is released.
+						Event::MemberRemoved { pool_id: 1, member: 10, released_balance: 5 },
+						// when the last member leaves, the pool is destroyed.
 						Event::Destroyed { pool_id: 1 }
 					]
 				);
@@ -3739,7 +3718,6 @@ mod withdraw_unbonded {
 				assert_eq!(
 					balances_events_since_last_call(),
 					vec![
-						BEvent::Transfer { from: default_bonded_account(), to: 10, amount: 5 },
 						BEvent::Thawed { who: default_reward_account(), amount: 5 },
 						BEvent::Transfer { from: default_reward_account(), to: 10, amount: 5 }
 					]
@@ -3753,11 +3731,9 @@ mod withdraw_unbonded {
 			.add_members(vec![(40, 40), (550, 550)])
 			.build_and_execute(|| {
 				let _ = balances_events_since_last_call();
-
 				// Given
 				// current bond is 600, we slash it all to 300.
 				StakingMock::slash_by(1, 300);
-				Currency::set_balance(&default_bonded_account(), 300);
 				assert_eq!(StakingMock::total_stake(&default_bonded_account()), Ok(300));
 
 				assert_ok!(fully_unbond_permissioned(40));
@@ -3787,10 +3763,6 @@ mod withdraw_unbonded {
 						}
 					]
 				);
-				assert_eq!(
-					balances_events_since_last_call(),
-					vec![BEvent::Burned { who: default_bonded_account(), amount: 300 },]
-				);
 
 				CurrentEra::set(StakingMock::bonding_duration());
 
@@ -3798,10 +3770,6 @@ mod withdraw_unbonded {
 				assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(40), 40, 0));
 
 				// Then
-				assert_eq!(
-					balances_events_since_last_call(),
-					vec![BEvent::Transfer { from: default_bonded_account(), to: 40, amount: 20 },]
-				);
 				assert_eq!(
 					pool_events_since_last_call(),
 					vec![
@@ -3819,10 +3787,6 @@ mod withdraw_unbonded {
 				assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(550), 550, 0));
 
 				// Then
-				assert_eq!(
-					balances_events_since_last_call(),
-					vec![BEvent::Transfer { from: default_bonded_account(), to: 550, amount: 275 },]
-				);
 				assert_eq!(
 					pool_events_since_last_call(),
 					vec![
@@ -3852,9 +3816,11 @@ mod withdraw_unbonded {
 				assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0));
 
 				// then
-				assert_eq!(Currency::free_balance(&10), 10 + 35);
-				assert_eq!(Currency::free_balance(&default_bonded_account()), 0);
-
+				assert_eq!(
+					DelegateMock::agent_balance(Agent::from(default_bonded_account())),
+					None
+				);
+				assert_eq!(StakingMock::stake(&default_bonded_account()).unwrap().total, 0);
 				// in this test 10 also gets a fair share of the slash, because the slash was
 				// applied to the bonded account.
 				assert_eq!(
@@ -3870,7 +3836,6 @@ mod withdraw_unbonded {
 				assert_eq!(
 					balances_events_since_last_call(),
 					vec![
-						BEvent::Transfer { from: default_bonded_account(), to: 10, amount: 5 },
 						BEvent::Thawed { who: default_reward_account(), amount: 5 },
 						BEvent::Transfer { from: default_reward_account(), to: 10, amount: 5 }
 					]
@@ -3878,35 +3843,6 @@ mod withdraw_unbonded {
 			});
 	}
 
-	#[test]
-	fn withdraw_unbonded_handles_faulty_sub_pool_accounting() {
-		ExtBuilder::default().build_and_execute(|| {
-			// Given
-			assert_eq!(Currency::minimum_balance(), 5);
-			assert_eq!(Currency::free_balance(&10), 35);
-			assert_eq!(Currency::free_balance(&default_bonded_account()), 10);
-			unsafe_set_state(1, PoolState::Destroying);
-			assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(10), 10));
-
-			// Simulate a slash that is not accounted for in the sub pools.
-			Currency::set_balance(&default_bonded_account(), 5);
-			assert_eq!(
-				SubPoolsStorage::<Runtime>::get(1).unwrap().with_era,
-				//------------------------------balance decrease is not account for
-				unbonding_pools_with_era! { 3 => UnbondPool { points: 10, balance: 10 } }
-			);
-
-			CurrentEra::set(3);
-
-			// When
-			assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0));
-
-			// Then
-			assert_eq!(Currency::free_balance(&10), 10 + 35);
-			assert_eq!(Currency::free_balance(&default_bonded_account()), 0);
-		});
-	}
-
 	#[test]
 	fn withdraw_unbonded_errors_correctly() {
 		ExtBuilder::default().with_check(0).build_and_execute(|| {
@@ -3925,6 +3861,10 @@ mod withdraw_unbonded {
 			let mut member = PoolMember { pool_id: 1, points: 10, ..Default::default() };
 			PoolMembers::<Runtime>::insert(11, member.clone());
 
+			// set agent and delegator balance
+			DelegateMock::set_agent_balance(Pools::generate_bonded_account(1), 10);
+			DelegateMock::set_delegator_balance(11, 10);
+
 			// Simulate calling `unbond`
 			member.unbonding_eras = member_unbonding_eras!(3 => 10);
 			PoolMembers::<Runtime>::insert(11, member.clone());
@@ -4045,7 +3985,7 @@ mod withdraw_unbonded {
 				}
 			);
 			CurrentEra::set(StakingMock::bonding_duration());
-			assert_eq!(Currency::free_balance(&100), 100);
+			assert_eq!(member_delegation(100), 100);
 
 			// Cannot permissionlessly withdraw
 			assert_noop!(
@@ -4061,6 +4001,7 @@ mod withdraw_unbonded {
 
 			assert_eq!(SubPoolsStorage::<Runtime>::get(1).unwrap(), Default::default(),);
 			assert_eq!(Currency::free_balance(&100), 100 + 100);
+			assert_eq!(member_delegation(100), 0);
 			assert!(!PoolMembers::<Runtime>::contains_key(100));
 			assert_eq!(
 				pool_events_since_last_call(),
@@ -4662,10 +4603,6 @@ mod withdraw_unbonded {
 
 			// move to era when unbonded funds can be withdrawn.
 			CurrentEra::set(4);
-
-			// increment consumer by 1 reproducing the erroneous consumer bug.
-			// refer https://github.com/paritytech/polkadot-sdk/issues/4440.
-			assert_ok!(frame_system::Pallet::<T>::inc_consumers(&pool_one));
 			assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0));
 
 			assert_eq!(
@@ -4712,7 +4649,7 @@ mod create {
 			));
 			assert_eq!(TotalValueLocked::<T>::get(), 10 + StakingMock::minimum_nominator_bond());
 
-			assert_eq!(Currency::free_balance(&11), 0);
+			assert_eq!(member_delegation(11), StakingMock::minimum_nominator_bond());
 			assert_eq!(
 				PoolMembers::<Runtime>::get(11).unwrap(),
 				PoolMember {
@@ -4851,7 +4788,7 @@ mod create {
 				789
 			));
 
-			assert_eq!(Currency::free_balance(&11), 0);
+			assert_eq!(member_delegation(11), StakingMock::minimum_nominator_bond());
 			// delete the initial pool created, then pool_Id `1` will be free
 
 			assert_noop!(
@@ -5014,16 +4951,9 @@ mod set_state {
 			// surpassed. Making this pool destroyable by anyone.
 			StakingMock::slash_by(1, 10);
 
-			// in mock we are using transfer stake which implies slash is greedy. Extrinsic to
-			// apply pending slash should fail.
-			assert_noop!(
-				Pools::apply_slash(RuntimeOrigin::signed(11), 10),
-				Error::<Runtime>::NotSupported
-			);
-
-			// pending slash api should return zero as well.
-			assert_eq!(Pools::api_pool_pending_slash(1), 0);
-			assert_eq!(Pools::api_member_pending_slash(10), 0);
+			// pending slash is correct.
+			assert_eq!(Pools::api_pool_pending_slash(1), 10);
+			assert_eq!(Pools::api_member_pending_slash(10), 10);
 
 			// When
 			assert_ok!(Pools::set_state(RuntimeOrigin::signed(11), 1, PoolState::Destroying));
@@ -5175,13 +5105,13 @@ mod bond_extra {
 			// given
 			assert_eq!(PoolMembers::<Runtime>::get(10).unwrap().points, 10);
 			assert_eq!(BondedPools::<Runtime>::get(1).unwrap().points, 10);
-			assert_eq!(Currency::free_balance(&10), 100);
+			assert_eq!(member_delegation(10), 10);
 
 			// when
 			assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10)));
 
 			// then
-			assert_eq!(Currency::free_balance(&10), 90);
+			assert_eq!(member_delegation(10), 10 + 10);
 			assert_eq!(PoolMembers::<Runtime>::get(10).unwrap().points, 20);
 			assert_eq!(BondedPools::<Runtime>::get(1).unwrap().points, 20);
 
@@ -5198,7 +5128,7 @@ mod bond_extra {
 			assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(20)));
 
 			// then
-			assert_eq!(Currency::free_balance(&10), 70);
+			assert_eq!(member_delegation(10), 20 + 20);
 			assert_eq!(PoolMembers::<Runtime>::get(10).unwrap().points, 40);
 			assert_eq!(BondedPools::<Runtime>::get(1).unwrap().points, 40);
 
@@ -5221,13 +5151,15 @@ mod bond_extra {
 			// given
 			assert_eq!(PoolMembers::<Runtime>::get(10).unwrap().points, 10);
 			assert_eq!(BondedPools::<Runtime>::get(1).unwrap().points, 10);
-			assert_eq!(Currency::free_balance(&10), 35);
+			// 10 has delegated 10 tokens to the pool.
+			assert_eq!(member_delegation(10), 10);
 
 			// when
 			assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::Rewards));
 
 			// then
-			assert_eq!(Currency::free_balance(&10), 35);
+			// delegator balance is increased by the claimable reward.
+			assert_eq!(member_delegation(10), 10 + claimable_reward);
 			assert_eq!(PoolMembers::<Runtime>::get(10).unwrap().points, 10 + claimable_reward);
 			assert_eq!(BondedPools::<Runtime>::get(1).unwrap().points, 10 + claimable_reward);
 
@@ -5264,8 +5196,8 @@ mod bond_extra {
 			assert_eq!(PoolMembers::<Runtime>::get(20).unwrap().points, 20);
 			assert_eq!(BondedPools::<Runtime>::get(1).unwrap().points, 30);
 
-			assert_eq!(Currency::free_balance(&10), 35);
-			assert_eq!(Currency::free_balance(&20), 20);
+			assert_eq!(member_delegation(10), 10);
+			assert_eq!(member_delegation(20), 20);
 			assert_eq!(TotalValueLocked::<T>::get(), 30);
 
 			// when
@@ -5273,7 +5205,7 @@ mod bond_extra {
 			assert_eq!(Currency::free_balance(&default_reward_account()), 7);
 
 			// then
-			assert_eq!(Currency::free_balance(&10), 35);
+			assert_eq!(member_delegation(10), 10 + 1);
 			assert_eq!(TotalValueLocked::<T>::get(), 31);
 
 			// 10's share of the reward is 1/3, since they gave 10/30 of the total shares.
@@ -5284,11 +5216,11 @@ mod bond_extra {
 			assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(20), BondExtra::Rewards));
 
 			// then
-			assert_eq!(Currency::free_balance(&20), 20);
 			assert_eq!(TotalValueLocked::<T>::get(), 33);
 
 			// 20's share of the rewards is the other 2/3 of the rewards, since they have 20/30 of
 			// the shares
+			assert_eq!(member_delegation(20), 20 + 2);
 			assert_eq!(PoolMembers::<Runtime>::get(20).unwrap().points, 20 + 2);
 			assert_eq!(BondedPools::<Runtime>::get(1).unwrap().points, 30 + 3);
 
@@ -5320,8 +5252,8 @@ mod bond_extra {
 			assert_eq!(PoolMembers::<Runtime>::get(10).unwrap().points, 10);
 			assert_eq!(PoolMembers::<Runtime>::get(20).unwrap().points, 20);
 			assert_eq!(BondedPools::<Runtime>::get(1).unwrap().points, 30);
-			assert_eq!(Currency::free_balance(&10), 35);
-			assert_eq!(Currency::free_balance(&20), 20);
+			assert_eq!(member_delegation(10), 10);
+			assert_eq!(member_delegation(20), 20);
 
 			// Permissioned by default
 			assert_noop!(
@@ -5337,7 +5269,7 @@ mod bond_extra {
 			assert_eq!(Currency::free_balance(&default_reward_account()), 7);
 
 			// then
-			assert_eq!(Currency::free_balance(&10), 35);
+			assert_eq!(member_delegation(10), 10 + 1);
 			assert_eq!(PoolMembers::<Runtime>::get(10).unwrap().points, 10 + 1);
 			assert_eq!(BondedPools::<Runtime>::get(1).unwrap().points, 30 + 1);
 
@@ -5355,7 +5287,7 @@ mod bond_extra {
 			));
 
 			// then
-			assert_eq!(Currency::free_balance(&20), 12);
+			assert_eq!(member_delegation(20), 20 + 10);
 			assert_eq!(Currency::free_balance(&default_reward_account()), 5);
 			assert_eq!(PoolMembers::<Runtime>::get(20).unwrap().points, 30);
 			assert_eq!(BondedPools::<Runtime>::get(1).unwrap().points, 41);
@@ -7487,63 +7419,3 @@ mod chill {
 		})
 	}
 }
-
-// the test mock is using `TransferStake` and so `DelegateStake` is not tested here. Extrinsics
-// meant for `DelegateStake` should be gated.
-//
-// `DelegateStake` tests are in `pallet-nomination-pools-test-delegate-stake`. Since we support both
-// strategies currently, we keep these tests as it is but in future we may remove `TransferStake`
-// completely.
-mod delegate_stake {
-	use super::*;
-	#[test]
-	fn delegation_specific_calls_are_gated() {
-		ExtBuilder::default().with_check(0).build_and_execute(|| {
-			// Given
-			Currency::set_balance(&11, ExistentialDeposit::get() + 2);
-			assert!(!PoolMembers::<Runtime>::contains_key(11));
-
-			// When
-			assert_ok!(Pools::join(RuntimeOrigin::signed(11), 2, 1));
-
-			// Then
-			assert_eq!(
-				pool_events_since_last_call(),
-				vec![
-					Event::Created { depositor: 10, pool_id: 1 },
-					Event::Bonded { member: 10, pool_id: 1, bonded: 10, joined: true },
-					Event::Bonded { member: 11, pool_id: 1, bonded: 2, joined: true },
-				]
-			);
-
-			assert_eq!(
-				PoolMembers::<Runtime>::get(11).unwrap(),
-				PoolMember::<Runtime> { pool_id: 1, points: 2, ..Default::default() }
-			);
-
-			// ensure pool 1 cannot be migrated.
-			assert!(!Pools::api_pool_needs_delegate_migration(1));
-			assert_noop!(
-				Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1),
-				Error::<Runtime>::NotSupported
-			);
-
-			// members cannot be migrated either.
-			assert!(!Pools::api_member_needs_delegate_migration(10));
-			assert_noop!(
-				Pools::migrate_delegation(RuntimeOrigin::signed(10), 11),
-				Error::<Runtime>::NotSupported
-			);
-
-			// Given
-			// The bonded balance is slashed in half
-			StakingMock::slash_by(1, 6);
-
-			// since slash is greedy with `TransferStake`, `apply_slash` should not work either.
-			assert_noop!(
-				Pools::apply_slash(RuntimeOrigin::signed(10), 11),
-				Error::<Runtime>::NotSupported
-			);
-		});
-	}
-}
diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs
index cc6335959ab..54783332aa3 100644
--- a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs
+++ b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs
@@ -21,7 +21,10 @@ mod mock;
 
 use frame_support::{
 	assert_noop, assert_ok, hypothetically,
-	traits::{fungible::InspectHold, Currency},
+	traits::{
+		fungible::{InspectHold, Mutate},
+		Currency,
+	},
 };
 use mock::*;
 use pallet_nomination_pools::{
@@ -942,9 +945,13 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() {
 fn pool_migration_e2e() {
 	new_test_ext().execute_with(|| {
 		LegacyAdapter::set(true);
-		assert_eq!(Balances::minimum_balance(), 5);
 		assert_eq!(CurrentEra::<T>::get(), None);
 
+		// hack: mint ED to pool so that the deprecated `TransferStake` works correctly with
+		// staking.
+		assert_eq!(Balances::minimum_balance(), 5);
+		assert_ok!(Balances::mint_into(&POOL1_BONDED, 5));
+
 		// create the pool with TransferStake strategy.
 		assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10));
 		assert_eq!(LastPoolId::<Runtime>::get(), 1);
@@ -1050,10 +1057,11 @@ fn pool_migration_e2e() {
 
 		assert_eq!(
 			delegated_staking_events_since_last_call(),
+			// delegated also contains the extra ED that we minted when pool was `TransferStake` .
 			vec![DelegatedStakingEvent::Delegated {
 				agent: POOL1_BONDED,
 				delegator: proxy_delegator_1,
-				amount: 50 + 10 * 3
+				amount: 50 + 10 * 3 + 5
 			}]
 		);
 
@@ -1223,6 +1231,11 @@ fn disable_pool_operations_on_non_migrated() {
 		assert_eq!(Balances::minimum_balance(), 5);
 		assert_eq!(CurrentEra::<T>::get(), None);
 
+		// hack: mint ED to pool so that the deprecated `TransferStake` works correctly with
+		// staking.
+		assert_eq!(Balances::minimum_balance(), 5);
+		assert_ok!(Balances::mint_into(&POOL1_BONDED, 5));
+
 		// create the pool with TransferStake strategy.
 		assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10));
 		assert_eq!(LastPoolId::<Runtime>::get(), 1);
@@ -1331,11 +1344,12 @@ fn disable_pool_operations_on_non_migrated() {
 		assert_ok!(Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1));
 		assert_eq!(
 			delegated_staking_events_since_last_call(),
+			// delegated also contains the extra ED that we minted when pool was `TransferStake` .
 			vec![DelegatedStakingEvent::Delegated {
 				agent: POOL1_BONDED,
 				delegator: DelegatedStaking::generate_proxy_delegator(Agent::from(POOL1_BONDED))
 					.get(),
-				amount: 50 + 10
+				amount: 50 + 10 + 5
 			},]
 		);
 
diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs
index d1bc4ef8ff2..d943ba6f533 100644
--- a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs
+++ b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs
@@ -15,6 +15,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+// Disable warnings for `TransferStake` being deprecated.
+#![allow(deprecated)]
+
 use frame_election_provider_support::VoteWeight;
 use frame_support::{
 	assert_ok, derive_impl,
@@ -92,6 +95,7 @@ parameter_types! {
 
 #[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)]
 impl pallet_staking::Config for Runtime {
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type UnixTime = pallet_timestamp::Pallet<Self>;
 	type AdminOrigin = frame_system::EnsureRoot<Self::AccountId>;
diff --git a/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml b/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml
deleted file mode 100644
index 0b21d5f4e8c..00000000000
--- a/substrate/frame/nomination-pools/test-transfer-stake/Cargo.toml
+++ /dev/null
@@ -1,39 +0,0 @@
-[package]
-name = "pallet-nomination-pools-test-transfer-stake"
-version = "1.0.0"
-authors.workspace = true
-edition.workspace = true
-license = "Apache-2.0"
-homepage.workspace = true
-repository.workspace = true
-description = "FRAME nomination pools pallet tests with the staking pallet"
-publish = false
-
-[lints]
-workspace = true
-
-[package.metadata.docs.rs]
-targets = ["x86_64-unknown-linux-gnu"]
-
-[dev-dependencies]
-codec = { features = ["derive"], workspace = true, default-features = true }
-scale-info = { features = ["derive"], workspace = true, default-features = true }
-
-sp-core = { workspace = true, default-features = true }
-sp-io = { workspace = true, default-features = true }
-sp-runtime = { workspace = true, default-features = true }
-sp-staking = { workspace = true, default-features = true }
-
-frame-election-provider-support = { workspace = true, default-features = true }
-frame-support = { workspace = true, default-features = true }
-frame-system = { workspace = true, default-features = true }
-
-pallet-bags-list = { workspace = true, default-features = true }
-pallet-balances = { workspace = true, default-features = true }
-pallet-nomination-pools = { workspace = true, default-features = true }
-pallet-staking = { workspace = true, default-features = true }
-pallet-staking-reward-curve = { workspace = true, default-features = true }
-pallet-timestamp = { workspace = true, default-features = true }
-
-log = { workspace = true, default-features = true }
-sp-tracing = { workspace = true, default-features = true }
diff --git a/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs b/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs
deleted file mode 100644
index cc39cfee91c..00000000000
--- a/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs
+++ /dev/null
@@ -1,912 +0,0 @@
-// This file is part of Substrate.
-
-// Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
-
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// 	http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#![cfg(test)]
-
-mod mock;
-
-use frame_support::{assert_noop, assert_ok, traits::Currency};
-use mock::*;
-use pallet_nomination_pools::{
-	BondExtra, BondedPools, Error as PoolsError, Event as PoolsEvent, LastPoolId, PoolMember,
-	PoolMembers, PoolState,
-};
-use pallet_staking::{
-	CurrentEra, Error as StakingError, Event as StakingEvent, Payee, RewardDestination,
-};
-use sp_runtime::{bounded_btree_map, traits::Zero};
-
-#[test]
-fn pool_lifecycle_e2e() {
-	new_test_ext().execute_with(|| {
-		assert_eq!(Balances::minimum_balance(), 5);
-		assert_eq!(CurrentEra::<T>::get(), None);
-
-		// create the pool, we know this has id 1.
-		assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10));
-		assert_eq!(LastPoolId::<Runtime>::get(), 1);
-
-		// have the pool nominate.
-		assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Created { depositor: 10, pool_id: 1 },
-				PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 50, joined: true },
-			]
-		);
-
-		// have two members join
-		assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1));
-		assert_ok!(Pools::join(RuntimeOrigin::signed(21), 10, 1));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![
-				StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 },
-				StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 },
-			]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true },
-				PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: 10, joined: true },
-			]
-		);
-
-		// pool goes into destroying
-		assert_ok!(Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Destroying));
-
-		// depositor cannot unbond yet.
-		assert_noop!(
-			Pools::unbond(RuntimeOrigin::signed(10), 10, 50),
-			PoolsError::<Runtime>::MinimumBondNotMet,
-		);
-
-		// now the members want to unbond.
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10));
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10));
-
-		assert_eq!(PoolMembers::<Runtime>::get(20).unwrap().unbonding_eras.len(), 1);
-		assert_eq!(PoolMembers::<Runtime>::get(20).unwrap().points, 0);
-		assert_eq!(PoolMembers::<Runtime>::get(21).unwrap().unbonding_eras.len(), 1);
-		assert_eq!(PoolMembers::<Runtime>::get(21).unwrap().points, 0);
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![
-				StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 },
-				StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 },
-			]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::StateChanged { pool_id: 1, new_state: PoolState::Destroying },
-				PoolsEvent::Unbonded { member: 20, pool_id: 1, points: 10, balance: 10, era: 3 },
-				PoolsEvent::Unbonded { member: 21, pool_id: 1, points: 10, balance: 10, era: 3 },
-			]
-		);
-
-		// depositor cannot still unbond
-		assert_noop!(
-			Pools::unbond(RuntimeOrigin::signed(10), 10, 50),
-			PoolsError::<Runtime>::MinimumBondNotMet,
-		);
-
-		for e in 1..BondingDuration::get() {
-			CurrentEra::<Runtime>::set(Some(e));
-			assert_noop!(
-				Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0),
-				PoolsError::<Runtime>::CannotWithdrawAny
-			);
-		}
-
-		// members are now unlocked.
-		CurrentEra::<Runtime>::set(Some(BondingDuration::get()));
-
-		// depositor cannot still unbond
-		assert_noop!(
-			Pools::unbond(RuntimeOrigin::signed(10), 10, 50),
-			PoolsError::<Runtime>::MinimumBondNotMet,
-		);
-
-		// but members can now withdraw.
-		assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0));
-		assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0));
-		assert!(PoolMembers::<Runtime>::get(20).is_none());
-		assert!(PoolMembers::<Runtime>::get(21).is_none());
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 20 },]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Withdrawn { member: 20, pool_id: 1, points: 10, balance: 10 },
-				PoolsEvent::MemberRemoved { pool_id: 1, member: 20, released_balance: 0 },
-				PoolsEvent::Withdrawn { member: 21, pool_id: 1, points: 10, balance: 10 },
-				PoolsEvent::MemberRemoved { pool_id: 1, member: 21, released_balance: 0 },
-			]
-		);
-
-		// as soon as all members have left, the depositor can try to unbond, but since the
-		// min-nominator intention is set, they must chill first.
-		assert_noop!(
-			Pools::unbond(RuntimeOrigin::signed(10), 10, 50),
-			pallet_staking::Error::<Runtime>::InsufficientBond
-		);
-
-		assert_ok!(Pools::chill(RuntimeOrigin::signed(10), 1));
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 50));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![
-				StakingEvent::Chilled { stash: POOL1_BONDED },
-				StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 50 },
-			]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![PoolsEvent::Unbonded { member: 10, pool_id: 1, points: 50, balance: 50, era: 6 }]
-		);
-
-		// waiting another bonding duration:
-		CurrentEra::<Runtime>::set(Some(BondingDuration::get() * 2));
-		assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 1));
-
-		// pools is fully destroyed now.
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 50 },]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Withdrawn { member: 10, pool_id: 1, points: 50, balance: 50 },
-				PoolsEvent::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 },
-				PoolsEvent::Destroyed { pool_id: 1 }
-			]
-		);
-	})
-}
-
-#[test]
-fn destroy_pool_with_erroneous_consumer() {
-	new_test_ext().execute_with(|| {
-		// create the pool, we know this has id 1.
-		assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10));
-		assert_eq!(LastPoolId::<Runtime>::get(), 1);
-
-		// expect consumers on pool account to be 2 (staking lock and an explicit inc by staking).
-		assert_eq!(frame_system::Pallet::<T>::consumers(&POOL1_BONDED), 2);
-
-		// increment consumer by 1 reproducing the erroneous consumer bug.
-		// refer https://github.com/paritytech/polkadot-sdk/issues/4440.
-		assert_ok!(frame_system::Pallet::<T>::inc_consumers(&POOL1_BONDED));
-		assert_eq!(frame_system::Pallet::<T>::consumers(&POOL1_BONDED), 3);
-
-		// have the pool nominate.
-		assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Created { depositor: 10, pool_id: 1 },
-				PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 50, joined: true },
-			]
-		);
-
-		// pool goes into destroying
-		assert_ok!(Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Destroying));
-
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![PoolsEvent::StateChanged { pool_id: 1, new_state: PoolState::Destroying },]
-		);
-
-		// move to era 1
-		CurrentEra::<Runtime>::set(Some(1));
-
-		// depositor need to chill before unbonding
-		assert_noop!(
-			Pools::unbond(RuntimeOrigin::signed(10), 10, 50),
-			pallet_staking::Error::<Runtime>::InsufficientBond
-		);
-
-		assert_ok!(Pools::chill(RuntimeOrigin::signed(10), 1));
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 50));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![
-				StakingEvent::Chilled { stash: POOL1_BONDED },
-				StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 50 },
-			]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![PoolsEvent::Unbonded {
-				member: 10,
-				pool_id: 1,
-				points: 50,
-				balance: 50,
-				era: 1 + 3
-			}]
-		);
-
-		// waiting bonding duration:
-		CurrentEra::<Runtime>::set(Some(1 + 3));
-		// this should work even with an extra consumer count on pool account.
-		assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 1));
-
-		// pools is fully destroyed now.
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 50 },]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Withdrawn { member: 10, pool_id: 1, points: 50, balance: 50 },
-				PoolsEvent::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 },
-				PoolsEvent::Destroyed { pool_id: 1 }
-			]
-		);
-	})
-}
-
-#[test]
-fn pool_chill_e2e() {
-	new_test_ext().execute_with(|| {
-		assert_eq!(Balances::minimum_balance(), 5);
-		assert_eq!(CurrentEra::<T>::get(), None);
-
-		// create the pool, we know this has id 1.
-		assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10));
-		assert_eq!(LastPoolId::<Runtime>::get(), 1);
-
-		// have the pool nominate.
-		assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Created { depositor: 10, pool_id: 1 },
-				PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 50, joined: true },
-			]
-		);
-
-		// have two members join
-		assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1));
-		assert_ok!(Pools::join(RuntimeOrigin::signed(21), 10, 1));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![
-				StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 },
-				StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 },
-			]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true },
-				PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: 10, joined: true },
-			]
-		);
-
-		// in case depositor does not have more than `MinNominatorBond` staked, we can end up in
-		// situation where a member unbonding would cause pool balance to drop below
-		// `MinNominatorBond` and hence not allowed. This can happen if the `MinNominatorBond` is
-		// increased after the pool is created.
-		assert_ok!(Staking::set_staking_configs(
-			RuntimeOrigin::root(),
-			pallet_staking::ConfigOp::Set(55), // minimum nominator bond
-			pallet_staking::ConfigOp::Noop,
-			pallet_staking::ConfigOp::Noop,
-			pallet_staking::ConfigOp::Noop,
-			pallet_staking::ConfigOp::Noop,
-			pallet_staking::ConfigOp::Noop,
-			pallet_staking::ConfigOp::Noop,
-		));
-
-		// members can unbond as long as total stake of the pool is above min nominator bond
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10),);
-		assert_eq!(PoolMembers::<Runtime>::get(20).unwrap().unbonding_eras.len(), 1);
-		assert_eq!(PoolMembers::<Runtime>::get(20).unwrap().points, 0);
-
-		// this member cannot unbond since it will cause `pool stake < MinNominatorBond`
-		assert_noop!(
-			Pools::unbond(RuntimeOrigin::signed(21), 21, 10),
-			StakingError::<Runtime>::InsufficientBond,
-		);
-
-		// members can call `chill` permissionlessly now
-		assert_ok!(Pools::chill(RuntimeOrigin::signed(20), 1));
-
-		// now another member can unbond.
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10));
-		assert_eq!(PoolMembers::<Runtime>::get(21).unwrap().unbonding_eras.len(), 1);
-		assert_eq!(PoolMembers::<Runtime>::get(21).unwrap().points, 0);
-
-		// nominator can not resume nomination until depositor have enough stake
-		assert_noop!(
-			Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]),
-			PoolsError::<Runtime>::MinimumBondNotMet,
-		);
-
-		// other members joining pool does not affect the depositor's ability to resume nomination
-		assert_ok!(Pools::join(RuntimeOrigin::signed(22), 10, 1));
-
-		assert_noop!(
-			Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]),
-			PoolsError::<Runtime>::MinimumBondNotMet,
-		);
-
-		// depositor can bond extra stake
-		assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10)));
-
-		// `chill` can not be called permissionlessly anymore
-		assert_noop!(
-			Pools::chill(RuntimeOrigin::signed(20), 1),
-			PoolsError::<Runtime>::NotNominator,
-		);
-
-		// now nominator can resume nomination
-		assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]));
-
-		// skip to make the unbonding period end.
-		CurrentEra::<Runtime>::set(Some(BondingDuration::get()));
-
-		// members can now withdraw.
-		assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0));
-		assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![
-				StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 },
-				StakingEvent::Chilled { stash: POOL1_BONDED },
-				StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 },
-				StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, // other member bonding
-				StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, // depositor bond extra
-				StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 20 },
-			]
-		);
-	})
-}
-
-#[test]
-fn pool_slash_e2e() {
-	new_test_ext().execute_with(|| {
-		ExistentialDeposit::set(1);
-		assert_eq!(Balances::minimum_balance(), 1);
-		assert_eq!(CurrentEra::<T>::get(), None);
-
-		// create the pool, we know this has id 1.
-		assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10));
-		assert_eq!(LastPoolId::<Runtime>::get(), 1);
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Created { depositor: 10, pool_id: 1 },
-				PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true },
-			]
-		);
-
-		assert_eq!(
-			Payee::<Runtime>::get(POOL1_BONDED),
-			Some(RewardDestination::Account(POOL1_REWARD))
-		);
-
-		// have two members join
-		assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1));
-		assert_ok!(Pools::join(RuntimeOrigin::signed(21), 20, 1));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![
-				StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 },
-				StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 }
-			]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 20, joined: true },
-				PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: 20, joined: true },
-			]
-		);
-
-		// now let's progress a bit.
-		CurrentEra::<Runtime>::set(Some(1));
-
-		// 20 / 80 of the total funds are unlocked, and safe from any further slash.
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10));
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![
-				StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 },
-				StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }
-			]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 4 },
-				PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10, era: 4 }
-			]
-		);
-
-		CurrentEra::<Runtime>::set(Some(2));
-
-		// note: depositor cannot fully unbond at this point.
-		// these funds will still get slashed.
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10));
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10));
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![
-				StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 },
-				StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 },
-				StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 },
-			]
-		);
-
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 5 },
-				PoolsEvent::Unbonded { member: 20, pool_id: 1, balance: 10, points: 10, era: 5 },
-				PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 10, points: 10, era: 5 },
-			]
-		);
-
-		// At this point, 20 are safe from slash, 30 are unlocking but vulnerable to slash, and
-		// another 30 are active and vulnerable to slash. Let's slash half of them.
-		pallet_staking::slashing::do_slash::<Runtime>(
-			&POOL1_BONDED,
-			30,
-			&mut Default::default(),
-			&mut Default::default(),
-			2, // slash era 2, affects chunks at era 5 onwards.
-		);
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				// 30 has been slashed to 15 (15 slash)
-				PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 5, balance: 15 },
-				// 30 has been slashed to 15 (15 slash)
-				PoolsEvent::PoolSlashed { pool_id: 1, balance: 15 }
-			]
-		);
-
-		CurrentEra::<Runtime>::set(Some(3));
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10));
-
-		assert_eq!(
-			PoolMembers::<Runtime>::get(21).unwrap(),
-			PoolMember {
-				pool_id: 1,
-				points: 0,
-				last_recorded_reward_counter: Zero::zero(),
-				// the 10 points unlocked just now correspond to 5 points in the unbond pool.
-				unbonding_eras: bounded_btree_map!(5 => 10, 6 => 5)
-			}
-		);
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 5 }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 5, points: 5, era: 6 }]
-		);
-
-		// now we start withdrawing. we do it all at once, at era 6 where 20 and 21 are fully free.
-		CurrentEra::<Runtime>::set(Some(6));
-		assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0));
-		assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0));
-
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				// 20 had unbonded 10 safely, and 10 got slashed by half.
-				PoolsEvent::Withdrawn { member: 20, pool_id: 1, balance: 10 + 5, points: 20 },
-				PoolsEvent::MemberRemoved { pool_id: 1, member: 20, released_balance: 0 },
-				// 21 unbonded all of it after the slash
-				PoolsEvent::Withdrawn { member: 21, pool_id: 1, balance: 5 + 5, points: 15 },
-				PoolsEvent::MemberRemoved { pool_id: 1, member: 21, released_balance: 0 }
-			]
-		);
-		assert_eq!(
-			staking_events_since_last_call(),
-			// a 10 (un-slashed) + 10/2 (slashed) balance from 10 has also been unlocked
-			vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 15 + 10 + 15 }]
-		);
-
-		// now, finally, we can unbond the depositor further than their current limit.
-		assert_ok!(Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Destroying));
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 20));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::StateChanged { pool_id: 1, new_state: PoolState::Destroying },
-				PoolsEvent::Unbonded { member: 10, pool_id: 1, points: 10, balance: 10, era: 9 }
-			]
-		);
-
-		CurrentEra::<Runtime>::set(Some(9));
-		assert_eq!(
-			PoolMembers::<Runtime>::get(10).unwrap(),
-			PoolMember {
-				pool_id: 1,
-				points: 0,
-				last_recorded_reward_counter: Zero::zero(),
-				unbonding_eras: bounded_btree_map!(4 => 10, 5 => 10, 9 => 10)
-			}
-		);
-		// withdraw the depositor, they should lose 12 balance in total due to slash.
-		assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 10 }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Withdrawn { member: 10, pool_id: 1, balance: 10 + 15, points: 30 },
-				PoolsEvent::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 },
-				PoolsEvent::Destroyed { pool_id: 1 }
-			]
-		);
-	});
-}
-
-#[test]
-fn pool_slash_proportional() {
-	// a typical example where 3 pool members unbond in era 99, 100, and 101, and a slash that
-	// happened in era 100 should only affect the latter two.
-	new_test_ext().execute_with(|| {
-		ExistentialDeposit::set(1);
-		BondingDuration::set(28);
-		assert_eq!(Balances::minimum_balance(), 1);
-		assert_eq!(CurrentEra::<T>::get(), None);
-
-		// create the pool, we know this has id 1.
-		assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10));
-		assert_eq!(LastPoolId::<T>::get(), 1);
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Created { depositor: 10, pool_id: 1 },
-				PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true },
-			]
-		);
-
-		// have two members join
-		let bond = 20;
-		assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1));
-		assert_ok!(Pools::join(RuntimeOrigin::signed(21), bond, 1));
-		assert_ok!(Pools::join(RuntimeOrigin::signed(22), bond, 1));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![
-				StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond },
-				StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond },
-				StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond },
-			]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true },
-				PoolsEvent::Bonded { member: 21, pool_id: 1, bonded: bond, joined: true },
-				PoolsEvent::Bonded { member: 22, pool_id: 1, bonded: bond, joined: true },
-			]
-		);
-
-		// now let's progress a lot.
-		CurrentEra::<T>::set(Some(99));
-
-		// and unbond
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond));
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![PoolsEvent::Unbonded {
-				member: 20,
-				pool_id: 1,
-				balance: bond,
-				points: bond,
-				era: 127
-			}]
-		);
-
-		CurrentEra::<T>::set(Some(100));
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, bond));
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![PoolsEvent::Unbonded {
-				member: 21,
-				pool_id: 1,
-				balance: bond,
-				points: bond,
-				era: 128
-			}]
-		);
-
-		CurrentEra::<T>::set(Some(101));
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(22), 22, bond));
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![PoolsEvent::Unbonded {
-				member: 22,
-				pool_id: 1,
-				balance: bond,
-				points: bond,
-				era: 129
-			}]
-		);
-
-		// Apply a slash that happened in era 100. This is typically applied with a delay.
-		// Of the total 100, 50 is slashed.
-		assert_eq!(BondedPools::<T>::get(1).unwrap().points, 40);
-		pallet_staking::slashing::do_slash::<Runtime>(
-			&POOL1_BONDED,
-			50,
-			&mut Default::default(),
-			&mut Default::default(),
-			100,
-		);
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				// This era got slashed 12.5, which rounded up to 13.
-				PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 128, balance: 7 },
-				// This era got slashed 12 instead of 12.5 because an earlier chunk got 0.5 more
-				// slashed, and 12 is all the remaining slash
-				PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 129, balance: 8 },
-				// Bonded pool got slashed for 25, remaining 15 in it.
-				PoolsEvent::PoolSlashed { pool_id: 1, balance: 15 }
-			]
-		);
-	});
-}
-
-#[test]
-fn pool_slash_non_proportional_only_bonded_pool() {
-	// A typical example where a pool member unbonds in era 99, and they can get away with a slash
-	// that happened in era 100, as long as the pool has enough active bond to cover the slash. If
-	// everything else in the slashing/staking system works, this should always be the case.
-	// Nonetheless, `ledger.slash` has been written such that it will slash greedily from any chunk
-	// if it runs out of chunks that it thinks should be affected by the slash.
-	new_test_ext().execute_with(|| {
-		ExistentialDeposit::set(1);
-		BondingDuration::set(28);
-		assert_eq!(Balances::minimum_balance(), 1);
-		assert_eq!(CurrentEra::<T>::get(), None);
-
-		// create the pool, we know this has id 1.
-		assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10));
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Created { depositor: 10, pool_id: 1 },
-				PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true },
-			]
-		);
-
-		// have two members join
-		let bond = 20;
-		assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1));
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true }]
-		);
-
-		// progress and unbond.
-		CurrentEra::<T>::set(Some(99));
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond));
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![PoolsEvent::Unbonded {
-				member: 20,
-				pool_id: 1,
-				balance: bond,
-				points: bond,
-				era: 127
-			}]
-		);
-
-		// slash for 30. This will be deducted only from the bonded pool.
-		CurrentEra::<T>::set(Some(100));
-		assert_eq!(BondedPools::<T>::get(1).unwrap().points, 40);
-		pallet_staking::slashing::do_slash::<Runtime>(
-			&POOL1_BONDED,
-			30,
-			&mut Default::default(),
-			&mut Default::default(),
-			100,
-		);
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![PoolsEvent::PoolSlashed { pool_id: 1, balance: 10 }]
-		);
-	});
-}
-
-#[test]
-fn pool_slash_non_proportional_bonded_pool_and_chunks() {
-	// An uncommon example where even though some funds are unlocked such that they should not be
-	// affected by a slash, we still slash out of them. This should not happen at all. If a
-	// nomination has unbonded, from the next era onwards, their exposure will drop, so if an era
-	// happens in that era, then their share of that slash should naturally be less, such that only
-	// their active ledger stake is enough to compensate it.
-	new_test_ext().execute_with(|| {
-		ExistentialDeposit::set(1);
-		BondingDuration::set(28);
-		assert_eq!(Balances::minimum_balance(), 1);
-		assert_eq!(CurrentEra::<T>::get(), None);
-
-		// create the pool, we know this has id 1.
-		assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10));
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				PoolsEvent::Created { depositor: 10, pool_id: 1 },
-				PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 40, joined: true },
-			]
-		);
-
-		// have two members join
-		let bond = 20;
-		assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1));
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: bond, joined: true }]
-		);
-
-		// progress and unbond.
-		CurrentEra::<T>::set(Some(99));
-		assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond));
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![PoolsEvent::Unbonded {
-				member: 20,
-				pool_id: 1,
-				balance: bond,
-				points: bond,
-				era: 127
-			}]
-		);
-
-		// slash 50. This will be deducted only from the bonded pool and one of the unbonding pools.
-		CurrentEra::<T>::set(Some(100));
-		assert_eq!(BondedPools::<T>::get(1).unwrap().points, 40);
-		pallet_staking::slashing::do_slash::<Runtime>(
-			&POOL1_BONDED,
-			50,
-			&mut Default::default(),
-			&mut Default::default(),
-			100,
-		);
-
-		assert_eq!(
-			staking_events_since_last_call(),
-			vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }]
-		);
-		assert_eq!(
-			pool_events_since_last_call(),
-			vec![
-				// out of 20, 10 was taken.
-				PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 127, balance: 10 },
-				// out of 40, all was taken.
-				PoolsEvent::PoolSlashed { pool_id: 1, balance: 0 }
-			]
-		);
-	});
-}
diff --git a/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs b/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs
deleted file mode 100644
index d913c5fe694..00000000000
--- a/substrate/frame/nomination-pools/test-transfer-stake/src/mock.rs
+++ /dev/null
@@ -1,231 +0,0 @@
-// This file is part of Substrate.
-
-// Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
-
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// 	http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use frame_election_provider_support::VoteWeight;
-use frame_support::{
-	assert_ok, derive_impl,
-	pallet_prelude::*,
-	parameter_types,
-	traits::{ConstU64, ConstU8, VariantCountOf},
-	PalletId,
-};
-use sp_runtime::{
-	traits::{Convert, IdentityLookup},
-	BuildStorage, FixedU128, Perbill,
-};
-
-type AccountId = u128;
-type BlockNumber = u64;
-type Balance = u128;
-
-pub(crate) type T = Runtime;
-
-pub(crate) const POOL1_BONDED: AccountId = 20318131474730217858575332831085u128;
-pub(crate) const POOL1_REWARD: AccountId = 20397359637244482196168876781421u128;
-
-#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
-impl frame_system::Config for Runtime {
-	type AccountId = AccountId;
-	type Lookup = IdentityLookup<Self::AccountId>;
-	type Block = Block;
-	type AccountData = pallet_balances::AccountData<Balance>;
-}
-
-impl pallet_timestamp::Config for Runtime {
-	type Moment = u64;
-	type OnTimestampSet = ();
-	type MinimumPeriod = ConstU64<5>;
-	type WeightInfo = ();
-}
-
-parameter_types! {
-	pub static ExistentialDeposit: Balance = 5;
-}
-
-#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)]
-impl pallet_balances::Config for Runtime {
-	type Balance = Balance;
-	type ExistentialDeposit = ExistentialDeposit;
-	type AccountStore = System;
-	type FreezeIdentifier = RuntimeFreezeReason;
-	type MaxFreezes = VariantCountOf<RuntimeFreezeReason>;
-	type RuntimeFreezeReason = RuntimeFreezeReason;
-}
-
-pallet_staking_reward_curve::build! {
-	const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!(
-		min_inflation: 0_025_000,
-		max_inflation: 0_100_000,
-		ideal_stake: 0_500_000,
-		falloff: 0_050_000,
-		max_piece_count: 40,
-		test_precision: 0_005_000,
-	);
-}
-
-parameter_types! {
-	pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS;
-	pub static BondingDuration: u32 = 3;
-}
-
-#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)]
-impl pallet_staking::Config for Runtime {
-	type Currency = Balances;
-	type UnixTime = pallet_timestamp::Pallet<Self>;
-	type AdminOrigin = frame_system::EnsureRoot<Self::AccountId>;
-	type BondingDuration = BondingDuration;
-	type EraPayout = pallet_staking::ConvertCurve<RewardCurve>;
-	type ElectionProvider =
-		frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>;
-	type GenesisElectionProvider = Self::ElectionProvider;
-	type VoterList = VoterList;
-	type TargetList = pallet_staking::UseValidatorsMap<Self>;
-	type EventListeners = Pools;
-	type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig;
-}
-
-parameter_types! {
-	pub static BagThresholds: &'static [VoteWeight] = &[10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000];
-}
-
-type VoterBagsListInstance = pallet_bags_list::Instance1;
-impl pallet_bags_list::Config<VoterBagsListInstance> for Runtime {
-	type RuntimeEvent = RuntimeEvent;
-	type WeightInfo = ();
-	type BagThresholds = BagThresholds;
-	type ScoreProvider = Staking;
-	type Score = VoteWeight;
-}
-
-pub struct BalanceToU256;
-impl Convert<Balance, sp_core::U256> for BalanceToU256 {
-	fn convert(n: Balance) -> sp_core::U256 {
-		n.into()
-	}
-}
-
-pub struct U256ToBalance;
-impl Convert<sp_core::U256, Balance> for U256ToBalance {
-	fn convert(n: sp_core::U256) -> Balance {
-		n.try_into().unwrap()
-	}
-}
-
-parameter_types! {
-	pub const PostUnbondingPoolsWindow: u32 = 10;
-	pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls");
-}
-
-impl pallet_nomination_pools::Config for Runtime {
-	type RuntimeEvent = RuntimeEvent;
-	type WeightInfo = ();
-	type Currency = Balances;
-	type RuntimeFreezeReason = RuntimeFreezeReason;
-	type RewardCounter = FixedU128;
-	type BalanceToU256 = BalanceToU256;
-	type U256ToBalance = U256ToBalance;
-	type StakeAdapter = pallet_nomination_pools::adapter::TransferStake<Self, Staking>;
-	type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow;
-	type MaxMetadataLen = ConstU32<256>;
-	type MaxUnbonding = ConstU32<8>;
-	type MaxPointsToBalance = ConstU8<10>;
-	type PalletId = PoolsPalletId;
-	type AdminOrigin = frame_system::EnsureRoot<Self::AccountId>;
-}
-
-type Block = frame_system::mocking::MockBlock<Runtime>;
-
-frame_support::construct_runtime!(
-	pub enum Runtime {
-		System: frame_system,
-		Timestamp: pallet_timestamp,
-		Balances: pallet_balances,
-		Staking: pallet_staking,
-		VoterList: pallet_bags_list::<Instance1>,
-		Pools: pallet_nomination_pools,
-	}
-);
-
-pub fn new_test_ext() -> sp_io::TestExternalities {
-	sp_tracing::try_init_simple();
-	let mut storage = frame_system::GenesisConfig::<Runtime>::default().build_storage().unwrap();
-	let _ = pallet_nomination_pools::GenesisConfig::<Runtime> {
-		min_join_bond: 2,
-		min_create_bond: 2,
-		max_pools: Some(3),
-		max_members_per_pool: Some(5),
-		max_members: Some(3 * 5),
-		global_max_commission: Some(Perbill::from_percent(90)),
-	}
-	.assimilate_storage(&mut storage)
-	.unwrap();
-
-	let _ = pallet_balances::GenesisConfig::<Runtime> {
-		balances: vec![(10, 100), (20, 100), (21, 100), (22, 100)],
-	}
-	.assimilate_storage(&mut storage)
-	.unwrap();
-
-	let mut ext = sp_io::TestExternalities::from(storage);
-
-	ext.execute_with(|| {
-		// for events to be deposited.
-		frame_system::Pallet::<Runtime>::set_block_number(1);
-
-		// set some limit for nominations.
-		assert_ok!(Staking::set_staking_configs(
-			RuntimeOrigin::root(),
-			pallet_staking::ConfigOp::Set(10), // minimum nominator bond
-			pallet_staking::ConfigOp::Noop,
-			pallet_staking::ConfigOp::Noop,
-			pallet_staking::ConfigOp::Noop,
-			pallet_staking::ConfigOp::Noop,
-			pallet_staking::ConfigOp::Noop,
-			pallet_staking::ConfigOp::Noop,
-		));
-	});
-
-	ext
-}
-
-parameter_types! {
-	static ObservedEventsPools: usize = 0;
-	static ObservedEventsStaking: usize = 0;
-	static ObservedEventsBalances: usize = 0;
-}
-
-pub(crate) fn pool_events_since_last_call() -> Vec<pallet_nomination_pools::Event<Runtime>> {
-	let events = System::events()
-		.into_iter()
-		.map(|r| r.event)
-		.filter_map(|e| if let RuntimeEvent::Pools(inner) = e { Some(inner) } else { None })
-		.collect::<Vec<_>>();
-	let already_seen = ObservedEventsPools::get();
-	ObservedEventsPools::set(events.len());
-	events.into_iter().skip(already_seen).collect()
-}
-
-pub(crate) fn staking_events_since_last_call() -> Vec<pallet_staking::Event<Runtime>> {
-	let events = System::events()
-		.into_iter()
-		.map(|r| r.event)
-		.filter_map(|e| if let RuntimeEvent::Staking(inner) = e { Some(inner) } else { None })
-		.collect::<Vec<_>>();
-	let already_seen = ObservedEventsStaking::get();
-	ObservedEventsStaking::set(events.len());
-	events.into_iter().skip(already_seen).collect()
-}
diff --git a/substrate/frame/offences/benchmarking/src/inner.rs b/substrate/frame/offences/benchmarking/src/inner.rs
index 75f3e9931e3..3d3cd470bc2 100644
--- a/substrate/frame/offences/benchmarking/src/inner.rs
+++ b/substrate/frame/offences/benchmarking/src/inner.rs
@@ -180,16 +180,12 @@ where
 	<T as frame_system::Config>::RuntimeEvent: TryInto<frame_system::Event<T>>,
 {
 	// make sure that all slashes have been applied
-	// (n nominators + one validator) * (slashed + unlocked) + deposit to reporter +
-	// reporter account endowed + some funds rescinded from issuance.
-	assert_eq!(
-		System::<T>::read_events_for_pallet::<pallet_balances::Event<T>>().len(),
-		2 * (offender_count + 1) + 3
-	);
+	// deposit to reporter + reporter account endowed.
+	assert_eq!(System::<T>::read_events_for_pallet::<pallet_balances::Event<T>>().len(), 2);
 	// (n nominators + one validator) * slashed + Slash Reported
 	assert_eq!(
 		System::<T>::read_events_for_pallet::<pallet_staking::Event<T>>().len(),
-		1 * (offender_count + 1) + 1
+		1 * (offender_count + 1) as usize + 1
 	);
 	// offence
 	assert_eq!(System::<T>::read_events_for_pallet::<pallet_offences::Event>().len(), 1);
diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs
index c5c178aa444..3c81f2a664e 100644
--- a/substrate/frame/offences/benchmarking/src/mock.rs
+++ b/substrate/frame/offences/benchmarking/src/mock.rs
@@ -125,6 +125,7 @@ impl onchain::Config for OnChainSeqPhragmen {
 
 #[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)]
 impl pallet_staking::Config for Test {
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type CurrencyBalance = <Self as pallet_balances::Config>::Balance;
 	type UnixTime = pallet_timestamp::Pallet<Self>;
diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs
index 7a96b8eade4..3f14dc00b56 100644
--- a/substrate/frame/root-offences/src/mock.rs
+++ b/substrate/frame/root-offences/src/mock.rs
@@ -126,6 +126,7 @@ parameter_types! {
 
 #[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)]
 impl pallet_staking::Config for Test {
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type CurrencyBalance = <Self as pallet_balances::Config>::Balance;
 	type UnixTime = Timestamp;
@@ -206,10 +207,10 @@ impl ExtBuilder {
 				(30, self.balance_factor * 50),
 				(40, self.balance_factor * 50),
 				// stashes
-				(11, self.balance_factor * 1000),
-				(21, self.balance_factor * 1000),
-				(31, self.balance_factor * 500),
-				(41, self.balance_factor * 1000),
+				(11, self.balance_factor * 1500),
+				(21, self.balance_factor * 1500),
+				(31, self.balance_factor * 1000),
+				(41, self.balance_factor * 2000),
 			],
 		}
 		.assimilate_storage(&mut storage)
diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs
index 346cd04c0fa..74201da3d2f 100644
--- a/substrate/frame/session/benchmarking/src/mock.rs
+++ b/substrate/frame/session/benchmarking/src/mock.rs
@@ -133,6 +133,7 @@ impl onchain::Config for OnChainSeqPhragmen {
 
 #[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)]
 impl pallet_staking::Config for Test {
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type CurrencyBalance = <Self as pallet_balances::Config>::Balance;
 	type UnixTime = pallet_timestamp::Pallet<Self>;
diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml
index 22176b6d720..74b1c78e9cb 100644
--- a/substrate/frame/staking/Cargo.toml
+++ b/substrate/frame/staking/Cargo.toml
@@ -41,6 +41,7 @@ rand_chacha = { optional = true, workspace = true }
 [dev-dependencies]
 frame-benchmarking = { workspace = true, default-features = true }
 frame-election-provider-support = { workspace = true, default-features = true }
+frame-support = { features = ["experimental"], workspace = true, default-features = true }
 pallet-bags-list = { workspace = true, default-features = true }
 pallet-balances = { workspace = true, default-features = true }
 pallet-staking-reward-curve = { workspace = true, default-features = true }
diff --git a/substrate/frame/staking/src/asset.rs b/substrate/frame/staking/src/asset.rs
index 23368b1f8fc..a1140d317c2 100644
--- a/substrate/frame/staking/src/asset.rs
+++ b/substrate/frame/staking/src/asset.rs
@@ -18,9 +18,15 @@
 //! Contains all the interactions with [`Config::Currency`] to manipulate the underlying staking
 //! asset.
 
-use frame_support::traits::{Currency, InspectLockableCurrency, LockableCurrency};
-
-use crate::{BalanceOf, Config, NegativeImbalanceOf, PositiveImbalanceOf};
+use crate::{BalanceOf, Config, HoldReason, NegativeImbalanceOf, PositiveImbalanceOf};
+use frame_support::traits::{
+	fungible::{
+		hold::{Balanced as FunHoldBalanced, Inspect as FunHoldInspect, Mutate as FunHoldMutate},
+		Balanced, Inspect as FunInspect,
+	},
+	tokens::{Fortitude, Precision, Preservation},
+};
+use sp_runtime::{DispatchResult, Saturating};
 
 /// Existential deposit for the chain.
 pub fn existential_deposit<T: Config>() -> BalanceOf<T> {
@@ -32,7 +38,7 @@ pub fn total_issuance<T: Config>() -> BalanceOf<T> {
 	T::Currency::total_issuance()
 }
 
-/// Total balance of `who`. Includes both, free and reserved.
+/// Total balance of `who`. Includes both free and staked.
 pub fn total_balance<T: Config>(who: &T::AccountId) -> BalanceOf<T> {
 	T::Currency::total_balance(who)
 }
@@ -41,42 +47,65 @@ pub fn total_balance<T: Config>(who: &T::AccountId) -> BalanceOf<T> {
 ///
 /// This includes balance free to stake along with any balance that is already staked.
 pub fn stakeable_balance<T: Config>(who: &T::AccountId) -> BalanceOf<T> {
-	T::Currency::free_balance(who)
+	free_to_stake::<T>(who).saturating_add(staked::<T>(who))
 }
 
 /// Balance of `who` that is currently at stake.
 ///
-/// The staked amount is locked and cannot be transferred out of `who`s account.
+/// The staked amount is on hold and cannot be transferred out of `who`s account.
 pub fn staked<T: Config>(who: &T::AccountId) -> BalanceOf<T> {
-	T::Currency::balance_locked(crate::STAKING_ID, who)
+	T::Currency::balance_on_hold(&HoldReason::Staking.into(), who)
+}
+
+/// Balance of who that can be staked additionally.
+///
+/// Does not include the current stake.
+pub fn free_to_stake<T: Config>(who: &T::AccountId) -> BalanceOf<T> {
+	// since we want to be able to use frozen funds for staking, we force the reduction.
+	T::Currency::reducible_balance(who, Preservation::Preserve, Fortitude::Force)
 }
 
 /// Set balance that can be staked for `who`.
 ///
-/// This includes any balance that is already staked.
+/// If `Value` is lower than the current staked balance, the difference is unlocked.
+///
+/// Should only be used with test.
 #[cfg(any(test, feature = "runtime-benchmarks"))]
 pub fn set_stakeable_balance<T: Config>(who: &T::AccountId, value: BalanceOf<T>) {
-	T::Currency::make_free_balance_be(who, value);
+	use frame_support::traits::fungible::Mutate;
+
+	// minimum free balance (non-staked) required to keep the account alive.
+	let ed = existential_deposit::<T>();
+	// currently on stake
+	let staked_balance = staked::<T>(who);
+
+	// if new value is greater than staked balance, mint some free balance.
+	if value > staked_balance {
+		let _ = T::Currency::set_balance(who, value - staked_balance + ed);
+	} else {
+		// else reduce the staked balance.
+		update_stake::<T>(who, value).expect("can remove from what is staked");
+		// burn all free, only leaving ED.
+		let _ = T::Currency::set_balance(who, ed);
+	}
+
+	// ensure new stakeable balance same as desired `value`.
+	assert_eq!(stakeable_balance::<T>(who), value);
 }
 
 /// Update `amount` at stake for `who`.
 ///
 /// Overwrites the existing stake amount. If passed amount is lower than the existing stake, the
 /// difference is unlocked.
-pub fn update_stake<T: Config>(who: &T::AccountId, amount: BalanceOf<T>) {
-	T::Currency::set_lock(
-		crate::STAKING_ID,
-		who,
-		amount,
-		frame_support::traits::WithdrawReasons::all(),
-	);
+pub fn update_stake<T: Config>(who: &T::AccountId, amount: BalanceOf<T>) -> DispatchResult {
+	T::Currency::set_on_hold(&HoldReason::Staking.into(), who, amount)
 }
 
-/// Kill the stake of `who`.
+/// Release all staked amount to `who`.
 ///
-/// All locked amount is unlocked.
-pub fn kill_stake<T: Config>(who: &T::AccountId) {
-	T::Currency::remove_lock(crate::STAKING_ID, who);
+/// Fails if there are consumers left on `who` that restricts it from being reaped.
+pub fn kill_stake<T: Config>(who: &T::AccountId) -> DispatchResult {
+	T::Currency::release_all(&HoldReason::Staking.into(), who, Precision::BestEffort).map(|_| ())
 }
 
 /// Slash the value from `who`.
@@ -86,29 +115,32 @@ pub fn slash<T: Config>(
 	who: &T::AccountId,
 	value: BalanceOf<T>,
 ) -> (NegativeImbalanceOf<T>, BalanceOf<T>) {
-	T::Currency::slash(who, value)
+	T::Currency::slash(&HoldReason::Staking.into(), who, value)
 }
 
 /// Mint `value` into an existing account `who`.
 ///
 /// This does not increase the total issuance.
-pub fn mint_existing<T: Config>(
+pub fn mint_into_existing<T: Config>(
 	who: &T::AccountId,
 	value: BalanceOf<T>,
 ) -> Option<PositiveImbalanceOf<T>> {
-	T::Currency::deposit_into_existing(who, value).ok()
+	// since the account already exists, we mint exact value even if value is below ED.
+	T::Currency::deposit(who, value, Precision::Exact).ok()
 }
 
-/// Mint reward and create account for `who` if it does not exist.
+/// Mint `value` and create account for `who` if it does not exist.
 ///
-/// This does not increase the total issuance.
+/// If value is below existential deposit, the account is not created.
+///
+/// Note: This does not increase the total issuance.
 pub fn mint_creating<T: Config>(who: &T::AccountId, value: BalanceOf<T>) -> PositiveImbalanceOf<T> {
-	T::Currency::deposit_creating(who, value)
+	T::Currency::deposit(who, value, Precision::BestEffort).unwrap_or_default()
 }
 
 /// Deposit newly issued or slashed `value` into `who`.
 pub fn deposit_slashed<T: Config>(who: &T::AccountId, value: NegativeImbalanceOf<T>) {
-	T::Currency::resolve_creating(who, value)
+	let _ = T::Currency::resolve(who, value);
 }
 
 /// Issue `value` increasing total issuance.
@@ -121,5 +153,5 @@ pub fn issue<T: Config>(value: BalanceOf<T>) -> NegativeImbalanceOf<T> {
 /// Burn the amount from the total issuance.
 #[cfg(feature = "runtime-benchmarks")]
 pub fn burn<T: Config>(amount: BalanceOf<T>) -> PositiveImbalanceOf<T> {
-	T::Currency::burn(amount)
+	T::Currency::rescind(amount)
 }
diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs
index 79d8dd3fbc3..59d272168d6 100644
--- a/substrate/frame/staking/src/benchmarking.rs
+++ b/substrate/frame/staking/src/benchmarking.rs
@@ -257,7 +257,11 @@ mod benchmarks {
 			.map(|l| l.active)
 			.ok_or("ledger not created after")?;
 
-		let _ = asset::mint_existing::<T>(&stash, max_additional).unwrap();
+		let _ = asset::mint_into_existing::<T>(
+			&stash,
+			max_additional + asset::existential_deposit::<T>(),
+		)
+		.unwrap();
 
 		whitelist_account!(stash);
 
@@ -1133,6 +1137,23 @@ mod benchmarks {
 		Ok(())
 	}
 
+	#[benchmark]
+	fn migrate_currency() -> Result<(), BenchmarkError> {
+		let (stash, _ctrl) =
+			create_stash_controller::<T>(USER_SEED, 100, RewardDestination::Staked)?;
+		let stake = asset::staked::<T>(&stash);
+		migrate_to_old_currency::<T>(stash.clone());
+		// no holds
+		assert!(asset::staked::<T>(&stash).is_zero());
+		whitelist_account!(stash);
+
+		#[extrinsic_call]
+		_(RawOrigin::Signed(stash.clone()), stash.clone());
+
+		assert_eq!(asset::staked::<T>(&stash), stake);
+		Ok(())
+	}
+
 	impl_benchmark_test_suite!(
 		Staking,
 		crate::mock::ExtBuilder::default().has_stakers(true),
diff --git a/substrate/frame/staking/src/ledger.rs b/substrate/frame/staking/src/ledger.rs
index ac3be04cf60..1d66ebd27e9 100644
--- a/substrate/frame/staking/src/ledger.rs
+++ b/substrate/frame/staking/src/ledger.rs
@@ -32,6 +32,7 @@
 //! state consistency.
 
 use frame_support::{defensive, ensure, traits::Defensive};
+use sp_runtime::DispatchResult;
 use sp_staking::{StakingAccount, StakingInterface};
 
 use crate::{
@@ -187,7 +188,8 @@ impl<T: Config> StakingLedger<T> {
 		// We skip locking virtual stakers.
 		if !Pallet::<T>::is_virtual_staker(&self.stash) {
 			// for direct stakers, update lock on stash based on ledger.
-			asset::update_stake::<T>(&self.stash, self.total);
+			asset::update_stake::<T>(&self.stash, self.total)
+				.map_err(|_| Error::<T>::NotEnoughFunds)?;
 		}
 
 		Ledger::<T>::insert(
@@ -250,7 +252,7 @@ impl<T: Config> StakingLedger<T> {
 
 	/// Clears all data related to a staking ledger and its bond in both [`Ledger`] and [`Bonded`]
 	/// storage items and updates the stash staking lock.
-	pub(crate) fn kill(stash: &T::AccountId) -> Result<(), Error<T>> {
+	pub(crate) fn kill(stash: &T::AccountId) -> DispatchResult {
 		let controller = <Bonded<T>>::get(stash).ok_or(Error::<T>::NotStash)?;
 
 		<Ledger<T>>::get(&controller).ok_or(Error::<T>::NotController).map(|ledger| {
@@ -259,9 +261,9 @@ impl<T: Config> StakingLedger<T> {
 			<Payee<T>>::remove(&stash);
 
 			// kill virtual staker if it exists.
-			if <VirtualStakers<T>>::take(&stash).is_none() {
+			if <VirtualStakers<T>>::take(&ledger.stash).is_none() {
 				// if not virtual staker, clear locks.
-				asset::kill_stake::<T>(&ledger.stash);
+				asset::kill_stake::<T>(&ledger.stash)?;
 			}
 
 			Ok(())
diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs
index 6361663b2b1..42230cb27b7 100644
--- a/substrate/frame/staking/src/lib.rs
+++ b/substrate/frame/staking/src/lib.rs
@@ -312,7 +312,8 @@ use codec::{Decode, Encode, HasCompact, MaxEncodedLen};
 use frame_support::{
 	defensive, defensive_assert,
 	traits::{
-		ConstU32, Currency, Defensive, DefensiveMax, DefensiveSaturating, Get, LockIdentifier,
+		tokens::fungible::{Credit, Debt},
+		ConstU32, Defensive, DefensiveMax, DefensiveSaturating, Get, LockIdentifier,
 	},
 	weights::Weight,
 	BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound,
@@ -361,12 +362,9 @@ pub type RewardPoint = u32;
 /// The balance type of this pallet.
 pub type BalanceOf<T> = <T as Config>::CurrencyBalance;
 
-type PositiveImbalanceOf<T> = <<T as Config>::Currency as Currency<
-	<T as frame_system::Config>::AccountId,
->>::PositiveImbalance;
-pub type NegativeImbalanceOf<T> = <<T as Config>::Currency as Currency<
-	<T as frame_system::Config>::AccountId,
->>::NegativeImbalance;
+type PositiveImbalanceOf<T> = Debt<<T as frame_system::Config>::AccountId, <T as Config>::Currency>;
+pub type NegativeImbalanceOf<T> =
+	Credit<<T as frame_system::Config>::AccountId, <T as Config>::Currency>;
 
 type AccountIdLookupOf<T> = <<T as frame_system::Config>::Lookup as StaticLookup>::Source;
 
diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs
index 769b84826b4..6346949576f 100644
--- a/substrate/frame/staking/src/mock.rs
+++ b/substrate/frame/staking/src/mock.rs
@@ -25,8 +25,7 @@ use frame_election_provider_support::{
 use frame_support::{
 	assert_ok, derive_impl, ord_parameter_types, parameter_types,
 	traits::{
-		ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Imbalance, LockableCurrency,
-		OnUnbalanced, OneSessionHandler, WithdrawReasons,
+		ConstU64, EitherOfDiverse, FindAuthor, Get, Imbalance, OnUnbalanced, OneSessionHandler,
 	},
 	weights::constants::RocksDbWeight,
 };
@@ -264,6 +263,7 @@ pub(crate) const DISABLING_LIMIT_FACTOR: usize = 3;
 
 #[derive_impl(crate::config_preludes::TestDefaultConfig)]
 impl crate::pallet::pallet::Config for Test {
+	type OldCurrency = Balances;
 	type Currency = Balances;
 	type UnixTime = Timestamp;
 	type RewardRemainder = RewardRemainderMock;
@@ -432,6 +432,7 @@ impl ExtBuilder {
 	fn build(self) -> sp_io::TestExternalities {
 		sp_tracing::try_init_simple();
 		let mut storage = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
+		let ed = ExistentialDeposit::get();
 
 		let _ = pallet_balances::GenesisConfig::<Test> {
 			balances: vec![
@@ -446,19 +447,23 @@ impl ExtBuilder {
 				(40, self.balance_factor),
 				(50, self.balance_factor),
 				// stashes
-				(11, self.balance_factor * 1000),
-				(21, self.balance_factor * 2000),
-				(31, self.balance_factor * 2000),
-				(41, self.balance_factor * 2000),
-				(51, self.balance_factor * 2000),
-				(201, self.balance_factor * 2000),
-				(202, self.balance_factor * 2000),
+				// Note: Previously this pallet used locks and stakers could stake all their
+				// balance including ED. Now with holds, stakers are required to maintain
+				// (non-staked) ED in their accounts. Therefore, we drop an additional existential
+				// deposit to genesis stakers.
+				(11, self.balance_factor * 1000 + ed),
+				(21, self.balance_factor * 2000 + ed),
+				(31, self.balance_factor * 2000 + ed),
+				(41, self.balance_factor * 2000 + ed),
+				(51, self.balance_factor * 2000 + ed),
+				(201, self.balance_factor * 2000 + ed),
+				(202, self.balance_factor * 2000 + ed),
 				// optional nominator
-				(100, self.balance_factor * 2000),
-				(101, self.balance_factor * 2000),
+				(100, self.balance_factor * 2000 + ed),
+				(101, self.balance_factor * 2000 + ed),
 				// aux accounts
 				(60, self.balance_factor),
-				(61, self.balance_factor * 2000),
+				(61, self.balance_factor * 2000 + ed),
 				(70, self.balance_factor),
 				(71, self.balance_factor * 2000),
 				(80, self.balance_factor),
@@ -575,7 +580,7 @@ pub(crate) fn current_era() -> EraIndex {
 }
 
 pub(crate) fn bond(who: AccountId, val: Balance) {
-	let _ = Balances::make_free_balance_be(&who, val);
+	let _ = asset::set_stakeable_balance::<Test>(&who, val);
 	assert_ok!(Staking::bond(RuntimeOrigin::signed(who), val, RewardDestination::Stash));
 }
 
@@ -600,10 +605,6 @@ pub(crate) fn bond_virtual_nominator(
 	val: Balance,
 	target: Vec<AccountId>,
 ) {
-	// In a real scenario, `who` is a keyless account managed by another pallet which provides for
-	// it.
-	System::inc_providers(&who);
-
 	// Bond who virtually.
 	assert_ok!(<Staking as sp_staking::StakingUnchecked>::virtual_bond(&who, val, &payee));
 	assert_ok!(Staking::nominate(RuntimeOrigin::signed(who), target));
@@ -809,7 +810,7 @@ pub(crate) fn bond_extra_no_checks(stash: &AccountId, amount: Balance) {
 	let mut ledger = Ledger::<Test>::get(&controller).expect("ledger must exist to bond_extra");
 
 	let new_total = ledger.total + amount;
-	Balances::set_lock(crate::STAKING_ID, stash, new_total, WithdrawReasons::all());
+	let _ = asset::update_stake::<Test>(stash, new_total);
 	ledger.total = new_total;
 	ledger.active = new_total;
 	Ledger::<Test>::insert(controller, ledger);
@@ -818,10 +819,10 @@ pub(crate) fn bond_extra_no_checks(stash: &AccountId, amount: Balance) {
 pub(crate) fn setup_double_bonded_ledgers() {
 	let init_ledgers = Ledger::<Test>::iter().count();
 
-	let _ = Balances::make_free_balance_be(&333, 2000);
-	let _ = Balances::make_free_balance_be(&444, 2000);
-	let _ = Balances::make_free_balance_be(&555, 2000);
-	let _ = Balances::make_free_balance_be(&777, 2000);
+	let _ = asset::set_stakeable_balance::<Test>(&333, 2000);
+	let _ = asset::set_stakeable_balance::<Test>(&444, 2000);
+	let _ = asset::set_stakeable_balance::<Test>(&555, 2000);
+	let _ = asset::set_stakeable_balance::<Test>(&777, 2000);
 
 	assert_ok!(Staking::bond(RuntimeOrigin::signed(333), 10, RewardDestination::Staked));
 	assert_ok!(Staking::bond(RuntimeOrigin::signed(444), 20, RewardDestination::Staked));
@@ -923,5 +924,5 @@ pub(crate) fn staking_events_since_last_call() -> Vec<crate::Event<Test>> {
 }
 
 pub(crate) fn balances(who: &AccountId) -> (Balance, Balance) {
-	(Balances::free_balance(who), Balances::reserved_balance(who))
+	(asset::stakeable_balance::<Test>(who), Balances::reserved_balance(who))
 }
diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs
index 2ae925d0364..8c3ff23315a 100644
--- a/substrate/frame/staking/src/pallet/impls.rs
+++ b/substrate/frame/staking/src/pallet/impls.rs
@@ -27,8 +27,8 @@ use frame_support::{
 	dispatch::WithPostDispatchInfo,
 	pallet_prelude::*,
 	traits::{
-		Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance, Len, OnUnbalanced,
-		TryCollect, UnixTime,
+		Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance,
+		InspectLockableCurrency, Len, LockableCurrency, OnUnbalanced, TryCollect, UnixTime,
 	},
 	weights::Weight,
 };
@@ -36,10 +36,9 @@ use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin};
 use pallet_session::historical;
 use sp_runtime::{
 	traits::{
-		Bounded, CheckedAdd, CheckedSub, Convert, One, SaturatedConversion, Saturating,
-		StaticLookup, Zero,
+		Bounded, CheckedAdd, Convert, One, SaturatedConversion, Saturating, StaticLookup, Zero,
 	},
-	ArithmeticError, Perbill, Percent,
+	ArithmeticError, DispatchResult, Perbill, Percent,
 };
 use sp_staking::{
 	currency_to_vote::CurrencyToVote,
@@ -54,6 +53,7 @@ use crate::{
 	BalanceOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing, IndividualExposure,
 	LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota,
 	PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs,
+	STAKING_ID,
 };
 use alloc::{boxed::Box, vec, vec::Vec};
 
@@ -96,10 +96,12 @@ impl<T: Config> Pallet<T> {
 	pub(crate) fn inspect_bond_state(
 		stash: &T::AccountId,
 	) -> Result<LedgerIntegrityState, Error<T>> {
-		let lock = asset::staked::<T>(&stash);
+		// look at any old unmigrated lock as well.
+		let hold_or_lock = asset::staked::<T>(&stash)
+			.max(T::OldCurrency::balance_locked(STAKING_ID, &stash).into());
 
 		let controller = <Bonded<T>>::get(stash).ok_or_else(|| {
-			if lock == Zero::zero() {
+			if hold_or_lock == Zero::zero() {
 				Error::<T>::NotStash
 			} else {
 				Error::<T>::BadState
@@ -111,7 +113,7 @@ impl<T: Config> Pallet<T> {
 				if ledger.stash != *stash {
 					Ok(LedgerIntegrityState::Corrupted)
 				} else {
-					if lock != ledger.total {
+					if hold_or_lock != ledger.total {
 						Ok(LedgerIntegrityState::LockCorrupted)
 					} else {
 						Ok(LedgerIntegrityState::Ok)
@@ -163,11 +165,7 @@ impl<T: Config> Pallet<T> {
 			additional
 		} else {
 			// additional amount or actual balance of stash whichever is lower.
-			additional.min(
-				asset::stakeable_balance::<T>(stash)
-					.checked_sub(&ledger.total)
-					.ok_or(ArithmeticError::Overflow)?,
-			)
+			additional.min(asset::free_to_stake::<T>(stash))
 		};
 
 		ledger.total = ledger.total.checked_add(&extra).ok_or(ArithmeticError::Overflow)?;
@@ -416,12 +414,12 @@ impl<T: Config> Pallet<T> {
 		let dest = Self::payee(StakingAccount::Stash(stash.clone()))?;
 
 		let maybe_imbalance = match dest {
-			RewardDestination::Stash => asset::mint_existing::<T>(stash, amount),
+			RewardDestination::Stash => asset::mint_into_existing::<T>(stash, amount),
 			RewardDestination::Staked => Self::ledger(Stash(stash.clone()))
 				.and_then(|mut ledger| {
 					ledger.active += amount;
 					ledger.total += amount;
-					let r = asset::mint_existing::<T>(stash, amount);
+					let r = asset::mint_into_existing::<T>(stash, amount);
 
 					let _ = ledger
 						.update()
@@ -799,8 +797,6 @@ impl<T: Config> Pallet<T> {
 		Self::do_remove_validator(&stash);
 		Self::do_remove_nominator(&stash);
 
-		frame_system::Pallet::<T>::dec_consumers(&stash);
-
 		Ok(())
 	}
 
@@ -1163,6 +1159,81 @@ impl<T: Config> Pallet<T> {
 	) -> Exposure<T::AccountId, BalanceOf<T>> {
 		EraInfo::<T>::get_full_exposure(era, account)
 	}
+
+	pub(super) fn do_migrate_currency(stash: &T::AccountId) -> DispatchResult {
+		if Self::is_virtual_staker(stash) {
+			return Self::do_migrate_virtual_staker(stash);
+		}
+
+		let ledger = Self::ledger(Stash(stash.clone()))?;
+		let staked: BalanceOf<T> = T::OldCurrency::balance_locked(STAKING_ID, stash).into();
+		ensure!(!staked.is_zero(), Error::<T>::AlreadyMigrated);
+		ensure!(ledger.total == staked, Error::<T>::BadState);
+
+		// remove old staking lock
+		T::OldCurrency::remove_lock(STAKING_ID, &stash);
+
+		// check if we can hold all stake.
+		let max_hold = asset::free_to_stake::<T>(&stash);
+		let force_withdraw = if max_hold >= staked {
+			// this means we can hold all stake. yay!
+			asset::update_stake::<T>(&stash, staked)?;
+			Zero::zero()
+		} else {
+			// if we are here, it means we cannot hold all user stake. We will do a force withdraw
+			// from ledger, but that's okay since anyways user do not have funds for it.
+			let force_withdraw = staked.saturating_sub(max_hold);
+
+			// we ignore if active is 0. It implies the locked amount is not actively staked. The
+			// account can still get away from potential slash but we can't do much better here.
+			StakingLedger {
+				total: max_hold,
+				active: ledger.active.saturating_sub(force_withdraw),
+				// we are not changing the stash, so we can keep the stash.
+				..ledger
+			}
+			.update()?;
+			force_withdraw
+		};
+
+		// Get rid of the extra consumer we used to have with OldCurrency.
+		frame_system::Pallet::<T>::dec_consumers(&stash);
+
+		Self::deposit_event(Event::<T>::CurrencyMigrated { stash: stash.clone(), force_withdraw });
+		Ok(())
+	}
+
+	fn do_migrate_virtual_staker(stash: &T::AccountId) -> DispatchResult {
+		// Funds for virtual stakers not managed/held by this pallet. We only need to clear
+		// the extra consumer we used to have with OldCurrency.
+		frame_system::Pallet::<T>::dec_consumers(&stash);
+
+		// The delegation system that manages the virtual staker needed to increment provider
+		// previously because of the consumer needed by this pallet. In reality, this stash
+		// is just a key for managing the ledger and the account does not need to hold any
+		// balance or exist. We decrement this provider.
+		let actual_providers = frame_system::Pallet::<T>::providers(stash);
+
+		let expected_providers =
+			// provider is expected to be 1 but someone can always transfer some free funds to
+			// these accounts, increasing the provider.
+			if asset::free_to_stake::<T>(&stash) >= asset::existential_deposit::<T>() {
+				2
+			} else {
+				1
+			};
+
+		// We should never have more than expected providers.
+		ensure!(actual_providers <= expected_providers, Error::<T>::BadState);
+
+		// if actual provider is less than expected, it is already migrated.
+		ensure!(actual_providers == expected_providers, Error::<T>::AlreadyMigrated);
+
+		// dec provider
+		let _ = frame_system::Pallet::<T>::dec_providers(&stash)?;
+
+		return Ok(())
+	}
 }
 
 impl<T: Config> Pallet<T> {
@@ -1925,9 +1996,10 @@ impl<T: Config> StakingInterface for Pallet<T> {
 }
 
 impl<T: Config> sp_staking::StakingUnchecked for Pallet<T> {
-	fn migrate_to_virtual_staker(who: &Self::AccountId) {
-		asset::kill_stake::<T>(who);
+	fn migrate_to_virtual_staker(who: &Self::AccountId) -> DispatchResult {
+		asset::kill_stake::<T>(who)?;
 		VirtualStakers::<T>::insert(who, ());
+		Ok(())
 	}
 
 	/// Virtually bonds `keyless_who` to `payee` with `value`.
@@ -1945,9 +2017,6 @@ impl<T: Config> sp_staking::StakingUnchecked for Pallet<T> {
 		// check if payee not same as who.
 		ensure!(keyless_who != payee, Error::<T>::RewardDestinationRestricted);
 
-		// mark this pallet as consumer of `who`.
-		frame_system::Pallet::<T>::inc_consumers(&keyless_who).map_err(|_| Error::<T>::BadState)?;
-
 		// mark who as a virtual staker.
 		VirtualStakers::<T>::insert(keyless_who, ());
 
@@ -1959,11 +2028,13 @@ impl<T: Config> sp_staking::StakingUnchecked for Pallet<T> {
 		Ok(())
 	}
 
+	/// Only meant to be used in tests.
 	#[cfg(feature = "runtime-benchmarks")]
 	fn migrate_to_direct_staker(who: &Self::AccountId) {
 		assert!(VirtualStakers::<T>::contains_key(who));
 		let ledger = StakingLedger::<T>::get(Stash(who.clone())).unwrap();
-		asset::update_stake::<T>(who, ledger.total);
+		let _ = asset::update_stake::<T>(who, ledger.total)
+			.expect("funds must be transferred to stash");
 		VirtualStakers::<T>::remove(who);
 	}
 }
@@ -2100,7 +2171,7 @@ impl<T: Config> Pallet<T> {
 				if VirtualStakers::<T>::contains_key(stash.clone()) {
 					ensure!(
 						asset::staked::<T>(&stash) == Zero::zero(),
-						"virtual stakers should not have any locked balance"
+						"virtual stakers should not have any staked balance"
 					);
 					ensure!(
 						<Bonded<T>>::get(stash.clone()).unwrap() == stash.clone(),
@@ -2128,7 +2199,7 @@ impl<T: Config> Pallet<T> {
 				} else {
 					ensure!(
 						Self::inspect_bond_state(&stash) == Ok(LedgerIntegrityState::Ok),
-						"bond, ledger and/or staking lock inconsistent for a bonded stash."
+						"bond, ledger and/or staking hold inconsistent for a bonded stash."
 					);
 				}
 
diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs
index b3f8c18f704..7d5da9ea0c4 100644
--- a/substrate/frame/staking/src/pallet/mod.rs
+++ b/substrate/frame/staking/src/pallet/mod.rs
@@ -25,8 +25,12 @@ use frame_election_provider_support::{
 use frame_support::{
 	pallet_prelude::*,
 	traits::{
+		fungible::{
+			hold::{Balanced as FunHoldBalanced, Mutate as FunHoldMutate},
+			Mutate as FunMutate,
+		},
 		Defensive, DefensiveSaturating, EnsureOrigin, EstimateNextNewSession, Get,
-		InspectLockableCurrency, LockableCurrency, OnUnbalanced, UnixTime,
+		InspectLockableCurrency, OnUnbalanced, UnixTime,
 	},
 	weights::Weight,
 	BoundedVec,
@@ -89,13 +93,27 @@ pub mod pallet {
 
 	#[pallet::config(with_default)]
 	pub trait Config: frame_system::Config {
+		/// The old trait for staking balance. Deprecated and only used for migrating old ledgers.
+		#[pallet::no_default]
+		type OldCurrency: InspectLockableCurrency<
+			Self::AccountId,
+			Moment = BlockNumberFor<Self>,
+			Balance = Self::CurrencyBalance,
+		>;
+
 		/// The staking balance.
 		#[pallet::no_default]
-		type Currency: LockableCurrency<
+		type Currency: FunHoldMutate<
 				Self::AccountId,
-				Moment = BlockNumberFor<Self>,
+				Reason = Self::RuntimeHoldReason,
 				Balance = Self::CurrencyBalance,
-			> + InspectLockableCurrency<Self::AccountId>;
+			> + FunMutate<Self::AccountId, Balance = Self::CurrencyBalance>
+			+ FunHoldBalanced<Self::AccountId, Balance = Self::CurrencyBalance>;
+
+		/// Overarching hold reason.
+		#[pallet::no_default_bounds]
+		type RuntimeHoldReason: From<HoldReason>;
+
 		/// Just the `Currency::Balance` type; we have this item to allow us to constrain it to
 		/// `From<u64>`.
 		type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned
@@ -106,6 +124,8 @@ pub mod pallet {
 			+ Default
 			+ From<u64>
 			+ TypeInfo
+			+ Send
+			+ Sync
 			+ MaxEncodedLen;
 		/// Time used for computing era duration.
 		///
@@ -309,6 +329,14 @@ pub mod pallet {
 		type WeightInfo: WeightInfo;
 	}
 
+	/// A reason for placing a hold on funds.
+	#[pallet::composite_enum]
+	pub enum HoldReason {
+		/// Funds on stake by a nominator or a validator.
+		#[codec(index = 0)]
+		Staking,
+	}
+
 	/// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`].
 	pub mod config_preludes {
 		use super::*;
@@ -327,6 +355,8 @@ pub mod pallet {
 		impl DefaultConfig for TestDefaultConfig {
 			#[inject_runtime_type]
 			type RuntimeEvent = ();
+			#[inject_runtime_type]
+			type RuntimeHoldReason = ();
 			type CurrencyBalance = u128;
 			type CurrencyToVote = ();
 			type NominationsQuota = crate::FixedNominationsQuota<16>;
@@ -765,7 +795,7 @@ pub mod pallet {
 					status
 				);
 				assert!(
-					asset::stakeable_balance::<T>(stash) >= balance,
+					asset::free_to_stake::<T>(stash) >= balance,
 					"Stash does not have enough balance to bond."
 				);
 				frame_support::assert_ok!(<Pallet<T>>::bond(
@@ -858,6 +888,9 @@ pub mod pallet {
 		ValidatorDisabled { stash: T::AccountId },
 		/// Validator has been re-enabled.
 		ValidatorReenabled { stash: T::AccountId },
+		/// Staking balance migrated from locks to holds, with any balance that could not be held
+		/// is force withdrawn.
+		CurrencyMigrated { stash: T::AccountId, force_withdraw: BalanceOf<T> },
 	}
 
 	#[pallet::error]
@@ -929,6 +962,10 @@ pub mod pallet {
 		NotEnoughFunds,
 		/// Operation not allowed for virtual stakers.
 		VirtualStakerNotAllowed,
+		/// Stash could not be reaped as other pallet might depend on it.
+		CannotReapStash,
+		/// The stake of this account is already migrated to `Fungible` holds.
+		AlreadyMigrated,
 	}
 
 	#[pallet::hooks]
@@ -1172,10 +1209,7 @@ pub mod pallet {
 				return Err(Error::<T>::InsufficientBond.into())
 			}
 
-			// Would fail if account has no provider.
-			frame_system::Pallet::<T>::inc_consumers(&stash)?;
-
-			let stash_balance = asset::stakeable_balance::<T>(&stash);
+			let stash_balance = asset::free_to_stake::<T>(&stash);
 			let value = value.min(stash_balance);
 			Self::deposit_event(Event::<T>::Bonded { stash: stash.clone(), amount: value });
 			let ledger = StakingLedger::<T>::new(stash.clone(), value);
@@ -2231,8 +2265,8 @@ pub mod pallet {
 
 					let new_total = if let Some(total) = maybe_total {
 						let new_total = total.min(stash_balance);
-						// enforce lock == ledger.amount.
-						asset::update_stake::<T>(&stash, new_total);
+						// enforce hold == ledger.amount.
+						asset::update_stake::<T>(&stash, new_total)?;
 						new_total
 					} else {
 						current_lock
@@ -2259,13 +2293,13 @@ pub mod pallet {
 					// to enforce a new ledger.total and staking lock for this stash.
 					let new_total =
 						maybe_total.ok_or(Error::<T>::CannotRestoreLedger)?.min(stash_balance);
-					asset::update_stake::<T>(&stash, new_total);
+					asset::update_stake::<T>(&stash, new_total)?;
 
 					Ok((stash.clone(), new_total))
 				},
 				Err(Error::<T>::BadState) => {
 					// the stash and ledger do not exist but lock is lingering.
-					asset::kill_stake::<T>(&stash);
+					asset::kill_stake::<T>(&stash)?;
 					ensure!(
 						Self::inspect_bond_state(&stash) == Err(Error::<T>::NotStash),
 						Error::<T>::BadState
@@ -2291,6 +2325,26 @@ pub mod pallet {
 			);
 			Ok(())
 		}
+
+		/// Migrates permissionlessly a stash from locks to holds.
+		///
+		/// This removes the old lock on the stake and creates a hold on it atomically. If all
+		/// stake cannot be held, the best effort is made to hold as much as possible. The remaining
+		/// stake is removed from the ledger.
+		///
+		/// The fee is waived if the migration is successful.
+		#[pallet::call_index(30)]
+		#[pallet::weight(T::WeightInfo::migrate_currency())]
+		pub fn migrate_currency(
+			origin: OriginFor<T>,
+			stash: T::AccountId,
+		) -> DispatchResultWithPostInfo {
+			let _ = ensure_signed(origin)?;
+			Self::do_migrate_currency(&stash)?;
+
+			// Refund the transaction fee if successful.
+			Ok(Pays::No.into())
+		}
 	}
 }
 
diff --git a/substrate/frame/staking/src/testing_utils.rs b/substrate/frame/staking/src/testing_utils.rs
index 81337710aa9..dfd5422106c 100644
--- a/substrate/frame/staking/src/testing_utils.rs
+++ b/substrate/frame/staking/src/testing_utils.rs
@@ -238,3 +238,21 @@ pub fn create_validators_with_nominators_for_era<T: Config>(
 pub fn current_era<T: Config>() -> EraIndex {
 	CurrentEra::<T>::get().unwrap_or(0)
 }
+
+pub fn migrate_to_old_currency<T: Config>(who: T::AccountId) {
+	use frame_support::traits::LockableCurrency;
+	let staked = asset::staked::<T>(&who);
+
+	// apply locks (this also adds a consumer).
+	T::OldCurrency::set_lock(
+		STAKING_ID,
+		&who,
+		staked,
+		frame_support::traits::WithdrawReasons::all(),
+	);
+	// remove holds.
+	asset::kill_stake::<T>(&who).expect("remove hold failed");
+
+	// replicate old behaviour of explicit increment of consumer.
+	frame_system::Pallet::<T>::inc_consumers(&who).expect("increment consumer failed");
+}
diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs
index 6c2335e1aac..90841514399 100644
--- a/substrate/frame/staking/src/tests.rs
+++ b/substrate/frame/staking/src/tests.rs
@@ -26,8 +26,12 @@ use frame_election_provider_support::{
 use frame_support::{
 	assert_noop, assert_ok, assert_storage_noop,
 	dispatch::{extract_actual_weight, GetDispatchInfo, WithPostDispatchInfo},
+	hypothetically,
 	pallet_prelude::*,
-	traits::{Currency, Get, ReservableCurrency},
+	traits::{
+		fungible::Inspect, Currency, Get, InspectLockableCurrency, LockableCurrency,
+		ReservableCurrency, WithdrawReasons,
+	},
 };
 
 use mock::*;
@@ -108,7 +112,7 @@ fn force_unstake_works() {
 		// Cant transfer
 		assert_noop!(
 			Balances::transfer_allow_death(RuntimeOrigin::signed(11), 1, 10),
-			TokenError::Frozen,
+			TokenError::FundsUnavailable,
 		);
 		// Force unstake requires root.
 		assert_noop!(Staking::force_unstake(RuntimeOrigin::signed(11), 11, 2), BadOrigin);
@@ -229,8 +233,7 @@ fn basic_setup_works() {
 		assert_eq!(active_era(), 0);
 
 		// Account 10 has `balance_factor` free balance
-		assert_eq!(asset::stakeable_balance::<Test>(&10), 1);
-		assert_eq!(asset::stakeable_balance::<Test>(&10), 1);
+		assert_eq!(Balances::balance(&10), 1);
 
 		// New era is not being forced
 		assert_eq!(ForceEra::<Test>::get(), Forcing::NotForcing);
@@ -360,8 +363,16 @@ fn rewards_should_work() {
 				remainder: maximum_payout - total_payout_0
 			}
 		);
+
+		// make note of total issuance before rewards.
+		let total_issuance_0 = asset::total_issuance::<Test>();
+
 		mock::make_all_reward_payment(0);
 
+		// total issuance should have increased
+		let total_issuance_1 = asset::total_issuance::<Test>();
+		assert_eq!(total_issuance_1, total_issuance_0 + total_payout_0);
+
 		assert_eq_error_rate!(
 			asset::total_balance::<Test>(&11),
 			init_balance_11 + part_for_11 * total_payout_0 * 2 / 3,
@@ -401,6 +412,7 @@ fn rewards_should_work() {
 		);
 		mock::make_all_reward_payment(1);
 
+		assert_eq!(asset::total_issuance::<Test>(), total_issuance_1 + total_payout_1);
 		assert_eq_error_rate!(
 			asset::total_balance::<Test>(&11),
 			init_balance_11 + part_for_11 * (total_payout_0 * 2 / 3 + total_payout_1),
@@ -490,7 +502,7 @@ fn staking_should_work() {
 			}
 		);
 		// e.g. it cannot reserve more than 500 that it has free from the total 2000
-		assert_noop!(Balances::reserve(&3, 501), BalancesError::<Test, _>::LiquidityRestrictions);
+		assert_noop!(Balances::reserve(&3, 501), DispatchError::ConsumerRemaining);
 		assert_ok!(Balances::reserve(&3, 409));
 	});
 }
@@ -689,7 +701,7 @@ fn nominating_and_rewards_should_work() {
 			);
 			// Nominator 3: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 21]'s reward. ==>
 			// 2/9 + 3/11
-			assert_eq!(asset::total_balance::<Test>(&3), initial_balance);
+			assert_eq!(asset::stakeable_balance::<Test>(&3), initial_balance);
 			// 333 is the reward destination for 3.
 			assert_eq_error_rate!(
 				asset::total_balance::<Test>(&333),
@@ -992,9 +1004,9 @@ fn cannot_transfer_staked_balance() {
 	ExtBuilder::default().nominate(false).build_and_execute(|| {
 		// Confirm account 11 is stashed
 		assert_eq!(Staking::bonded(&11), Some(11));
-		// Confirm account 11 has some free balance
+		// Confirm account 11 has some stakeable balance
 		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
-		// Confirm account 11 (via controller) is totally staked
+		// Confirm account 11 is totally staked
 		assert_eq!(Staking::eras_stakers(active_era(), &11).total, 1000);
 		// Confirm account 11 cannot transfer as a result
 		assert_noop!(
@@ -1021,11 +1033,12 @@ fn cannot_transfer_staked_balance_2() {
 		assert_eq!(asset::stakeable_balance::<Test>(&21), 2000);
 		// Confirm account 21 (via controller) is totally staked
 		assert_eq!(Staking::eras_stakers(active_era(), &21).total, 1000);
-		// Confirm account 21 can transfer at most 1000
+		// Confirm account 21 cannot transfer more than 1000
 		assert_noop!(
 			Balances::transfer_allow_death(RuntimeOrigin::signed(21), 21, 1001),
 			TokenError::Frozen,
 		);
+		// Confirm account 21 needs to leave at least ED in free balance to be able to transfer
 		assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(21), 21, 1000));
 	});
 }
@@ -1036,17 +1049,61 @@ fn cannot_reserve_staked_balance() {
 	ExtBuilder::default().build_and_execute(|| {
 		// Confirm account 11 is stashed
 		assert_eq!(Staking::bonded(&11), Some(11));
-		// Confirm account 11 has some free balance
-		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
-		// Confirm account 11 (via controller 10) is totally staked
-		assert_eq!(Staking::eras_stakers(active_era(), &11).own, 1000);
+		// Confirm account 11 is totally staked
+		assert_eq!(asset::staked::<Test>(&11), 1000);
+
 		// Confirm account 11 cannot reserve as a result
-		assert_noop!(Balances::reserve(&11, 1), BalancesError::<Test, _>::LiquidityRestrictions);
+		assert_noop!(Balances::reserve(&11, 2), BalancesError::<Test, _>::InsufficientBalance);
+		assert_noop!(Balances::reserve(&11, 1), DispatchError::ConsumerRemaining);
 
 		// Give account 11 extra free balance
-		let _ = asset::set_stakeable_balance::<Test>(&11, 10000);
+		let _ = asset::set_stakeable_balance::<Test>(&11, 1000 + 1000);
+		assert_eq!(asset::free_to_stake::<Test>(&11), 1000);
+
 		// Confirm account 11 can now reserve balance
-		assert_ok!(Balances::reserve(&11, 1));
+		assert_ok!(Balances::reserve(&11, 500));
+
+		// free to stake balance has reduced
+		assert_eq!(asset::free_to_stake::<Test>(&11), 500);
+	});
+}
+
+#[test]
+fn locked_balance_can_be_staked() {
+	// Checks that a bonded account cannot reserve balance from free balance
+	ExtBuilder::default().build_and_execute(|| {
+		// Confirm account 11 is stashed
+		assert_eq!(Staking::bonded(&11), Some(11));
+		assert_eq!(asset::staked::<Test>(&11), 1000);
+		assert_eq!(asset::free_to_stake::<Test>(&11), 0);
+
+		// add some staking balance to 11
+		let _ = asset::set_stakeable_balance::<Test>(&11, 1000 + 1000);
+		// free to stake is 1000
+		assert_eq!(asset::free_to_stake::<Test>(&11), 1000);
+
+		// lock some balance
+		Balances::set_lock(*b"somelock", &11, 500, WithdrawReasons::all());
+
+		// locked balance still available for staking
+		assert_eq!(asset::free_to_stake::<Test>(&11), 1000);
+
+		// can stake free balance
+		assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(11), 500));
+		assert_eq!(asset::staked::<Test>(&11), 1500);
+
+		// Can stake the locked balance
+		assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(11), 500));
+		assert_eq!(asset::staked::<Test>(&11), 2000);
+		// no balance left to stake
+		assert_eq!(asset::free_to_stake::<Test>(&11), 0);
+
+		// this does not fail if someone tries to stake more than free balance but just stakes
+		// whatever is available. (not sure if that is the best way, but we keep it backward
+		// compatible)
+		assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(11), 10));
+		// no extra balance staked.
+		assert_eq!(asset::staked::<Test>(&11), 2000);
 	});
 }
 
@@ -1057,9 +1114,9 @@ fn reward_destination_works() {
 		// Check that account 11 is a validator
 		assert!(Session::validators().contains(&11));
 		// Check the balance of the validator account
-		assert_eq!(asset::stakeable_balance::<Test>(&10), 1);
+		assert_eq!(asset::total_balance::<Test>(&10), 1);
 		// Check the balance of the stash account
-		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000);
+		assert_eq!(asset::total_balance::<Test>(&11), 1001);
 		// Check how much is at stake
 		assert_eq!(
 			Staking::ledger(11.into()).unwrap(),
@@ -1294,12 +1351,12 @@ fn bond_extra_and_withdraw_unbonded_works() {
 		// Give account 11 some large free balance greater than total
 		let _ = asset::set_stakeable_balance::<Test>(&11, 1000000);
 
+		// ensure it has the correct balance.
+		assert_eq!(asset::stakeable_balance::<Test>(&11), 1000000);
+
 		// Initial config should be correct
 		assert_eq!(active_era(), 0);
 
-		// check the balance of a validator accounts.
-		assert_eq!(asset::total_balance::<Test>(&11), 1000000);
-
 		// confirm that 10 is a normal validator and gets paid at the end of the era.
 		mock::start_active_era(1);
 
@@ -2077,7 +2134,7 @@ fn bond_with_no_staked_value() {
 			);
 			// bonded with absolute minimum value possible.
 			assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 5, RewardDestination::Account(1)));
-			assert_eq!(pallet_balances::Locks::<Test>::get(&1)[0].amount, 5);
+			assert_eq!(pallet_balances::Holds::<Test>::get(&1)[0].amount, 5);
 
 			// unbonding even 1 will cause all to be unbonded.
 			assert_ok!(Staking::unbond(RuntimeOrigin::signed(1), 1));
@@ -2098,14 +2155,14 @@ fn bond_with_no_staked_value() {
 			// not yet removed.
 			assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(1), 0));
 			assert!(Staking::ledger(1.into()).is_ok());
-			assert_eq!(pallet_balances::Locks::<Test>::get(&1)[0].amount, 5);
+			assert_eq!(pallet_balances::Holds::<Test>::get(&1)[0].amount, 5);
 
 			mock::start_active_era(3);
 
 			// poof. Account 1 is removed from the staking system.
 			assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(1), 0));
 			assert!(Staking::ledger(1.into()).is_err());
-			assert_eq!(pallet_balances::Locks::<Test>::get(&1).len(), 0);
+			assert_eq!(pallet_balances::Holds::<Test>::get(&1).len(), 0);
 		});
 }
 
@@ -2338,9 +2395,20 @@ fn reward_validator_slashing_validator_does_not_overflow() {
 		EraInfo::<Test>::set_exposure(0, &11, exposure);
 		ErasValidatorReward::<Test>::insert(0, stake);
 		assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 0));
-		assert_eq!(asset::total_balance::<Test>(&11), stake * 2);
+		assert_eq!(asset::stakeable_balance::<Test>(&11), stake * 2);
 
-		// Set staker
+		// ensure ledger has `stake` and no more.
+		Ledger::<Test>::insert(
+			11,
+			StakingLedgerInspect {
+				stash: 11,
+				total: stake,
+				active: stake,
+				unlocking: Default::default(),
+				legacy_claimed_rewards: bounded_vec![1],
+			},
+		);
+		// Set staker (unsafe, can reduce balance below actual stake)
 		let _ = asset::set_stakeable_balance::<Test>(&11, stake);
 		let _ = asset::set_stakeable_balance::<Test>(&2, stake);
 
@@ -2366,8 +2434,8 @@ fn reward_validator_slashing_validator_does_not_overflow() {
 			&[Perbill::from_percent(100)],
 		);
 
-		assert_eq!(asset::total_balance::<Test>(&11), stake - 1);
-		assert_eq!(asset::total_balance::<Test>(&2), 1);
+		assert_eq!(asset::stakeable_balance::<Test>(&11), stake - 1);
+		assert_eq!(asset::stakeable_balance::<Test>(&2), 1);
 	})
 }
 
@@ -2627,8 +2695,8 @@ fn reporters_receive_their_slice() {
 		// 50% * (10% * initial_balance / 2)
 		let reward = (initial_balance / 20) / 2;
 		let reward_each = reward / 2; // split into two pieces.
-		assert_eq!(asset::stakeable_balance::<Test>(&1), 10 + reward_each);
-		assert_eq!(asset::stakeable_balance::<Test>(&2), 20 + reward_each);
+		assert_eq!(asset::total_balance::<Test>(&1), 10 + reward_each);
+		assert_eq!(asset::total_balance::<Test>(&2), 20 + reward_each);
 	});
 }
 
@@ -2653,7 +2721,7 @@ fn subsequent_reports_in_same_span_pay_out_less() {
 		// F1 * (reward_proportion * slash - 0)
 		// 50% * (10% * initial_balance * 20%)
 		let reward = (initial_balance / 5) / 20;
-		assert_eq!(asset::stakeable_balance::<Test>(&1), 10 + reward);
+		assert_eq!(asset::total_balance::<Test>(&1), 10 + reward);
 
 		on_offence_now(
 			&[OffenceDetails {
@@ -2668,7 +2736,7 @@ fn subsequent_reports_in_same_span_pay_out_less() {
 		// F1 * (reward_proportion * slash - prior_payout)
 		// 50% * (10% * (initial_balance / 2) - prior_payout)
 		let reward = ((initial_balance / 20) - prior_payout) / 2;
-		assert_eq!(asset::stakeable_balance::<Test>(&1), 10 + prior_payout + reward);
+		assert_eq!(asset::total_balance::<Test>(&1), 10 + prior_payout + reward);
 	});
 }
 
@@ -2812,8 +2880,9 @@ fn garbage_collection_after_slashing() {
 			// validator and nominator slash in era are garbage-collected by era change,
 			// so we don't test those here.
 
-			assert_eq!(asset::stakeable_balance::<Test>(&11), 2);
-			assert_eq!(asset::total_balance::<Test>(&11), 2);
+			assert_eq!(asset::stakeable_balance::<Test>(&11), 0);
+			// Non staked balance is not touched.
+			assert_eq!(asset::total_balance::<Test>(&11), ExistentialDeposit::get());
 
 			let slashing_spans = SlashingSpans::<Test>::get(&11).unwrap();
 			assert_eq!(slashing_spans.iter().count(), 2);
@@ -6092,7 +6161,7 @@ fn nomination_quota_max_changes_decoding() {
 		.add_staker(70, 71, 10, StakerStatus::Nominator(vec![1, 2, 3]))
 		.add_staker(30, 330, 10, StakerStatus::Nominator(vec![1, 2, 3, 4]))
 		.add_staker(50, 550, 10, StakerStatus::Nominator(vec![1, 2, 3, 4]))
-		.balance_factor(10)
+		.balance_factor(11)
 		.build_and_execute(|| {
 			// pre-condition.
 			assert_eq!(MaxNominationsOf::<Test>::get(), 16);
@@ -6208,240 +6277,248 @@ fn force_apply_min_commission_works() {
 
 #[test]
 fn proportional_slash_stop_slashing_if_remaining_zero() {
-	let c = |era, value| UnlockChunk::<Balance> { era, value };
+	ExtBuilder::default().nominate(true).build_and_execute(|| {
+		let c = |era, value| UnlockChunk::<Balance> { era, value };
 
-	// we have some chunks, but they are not affected.
-	let unlocking = bounded_vec![c(1, 10), c(2, 10)];
+		// we have some chunks, but they are not affected.
+		let unlocking = bounded_vec![c(1, 10), c(2, 10)];
 
-	// Given
-	let mut ledger = StakingLedger::<Test>::new(123, 20);
-	ledger.total = 40;
-	ledger.unlocking = unlocking;
+		// Given
+		let mut ledger = StakingLedger::<Test>::new(123, 20);
+		ledger.total = 40;
+		ledger.unlocking = unlocking;
 
-	assert_eq!(BondingDuration::get(), 3);
+		assert_eq!(BondingDuration::get(), 3);
 
-	// should not slash more than the amount requested, by accidentally slashing the first chunk.
-	assert_eq!(ledger.slash(18, 1, 0), 18);
+		// should not slash more than the amount requested, by accidentally slashing the first
+		// chunk.
+		assert_eq!(ledger.slash(18, 1, 0), 18);
+	});
 }
 
 #[test]
 fn proportional_ledger_slash_works() {
-	let c = |era, value| UnlockChunk::<Balance> { era, value };
-	// Given
-	let mut ledger = StakingLedger::<Test>::new(123, 10);
-	assert_eq!(BondingDuration::get(), 3);
-
-	// When we slash a ledger with no unlocking chunks
-	assert_eq!(ledger.slash(5, 1, 0), 5);
-	// Then
-	assert_eq!(ledger.total, 5);
-	assert_eq!(ledger.active, 5);
-	assert_eq!(LedgerSlashPerEra::get().0, 5);
-	assert_eq!(LedgerSlashPerEra::get().1, Default::default());
-
-	// When we slash a ledger with no unlocking chunks and the slash amount is greater then the
-	// total
-	assert_eq!(ledger.slash(11, 1, 0), 5);
-	// Then
-	assert_eq!(ledger.total, 0);
-	assert_eq!(ledger.active, 0);
-	assert_eq!(LedgerSlashPerEra::get().0, 0);
-	assert_eq!(LedgerSlashPerEra::get().1, Default::default());
-
-	// Given
-	ledger.unlocking = bounded_vec![c(4, 10), c(5, 10)];
-	ledger.total = 2 * 10;
-	ledger.active = 0;
-	// When all the chunks overlap with the slash eras
-	assert_eq!(ledger.slash(20, 0, 0), 20);
-	// Then
-	assert_eq!(ledger.unlocking, vec![]);
-	assert_eq!(ledger.total, 0);
-	assert_eq!(LedgerSlashPerEra::get().0, 0);
-	assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(4, 0), (5, 0)]));
-
-	// Given
-	ledger.unlocking = bounded_vec![c(4, 100), c(5, 100), c(6, 100), c(7, 100)];
-	ledger.total = 4 * 100;
-	ledger.active = 0;
-	// When the first 2 chunks don't overlap with the affected range of unlock eras.
-	assert_eq!(ledger.slash(140, 0, 3), 140);
-	// Then
-	assert_eq!(ledger.unlocking, vec![c(4, 100), c(5, 100), c(6, 30), c(7, 30)]);
-	assert_eq!(ledger.total, 4 * 100 - 140);
-	assert_eq!(LedgerSlashPerEra::get().0, 0);
-	assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(6, 30), (7, 30)]));
-
-	// Given
-	ledger.unlocking = bounded_vec![c(4, 100), c(5, 100), c(6, 100), c(7, 100)];
-	ledger.total = 4 * 100;
-	ledger.active = 0;
-	// When the first 2 chunks don't overlap with the affected range of unlock eras.
-	assert_eq!(ledger.slash(15, 0, 3), 15);
-	// Then
-	assert_eq!(ledger.unlocking, vec![c(4, 100), c(5, 100), c(6, 100 - 8), c(7, 100 - 7)]);
-	assert_eq!(ledger.total, 4 * 100 - 15);
-	assert_eq!(LedgerSlashPerEra::get().0, 0);
-	assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(6, 92), (7, 93)]));
-
-	// Given
-	ledger.unlocking = bounded_vec![c(4, 40), c(5, 100), c(6, 10), c(7, 250)];
-	ledger.active = 500;
-	// 900
-	ledger.total = 40 + 10 + 100 + 250 + 500;
-	// When we have a partial slash that touches all chunks
-	assert_eq!(ledger.slash(900 / 2, 0, 0), 450);
-	// Then
-	assert_eq!(ledger.active, 500 / 2);
-	assert_eq!(ledger.unlocking, vec![c(4, 40 / 2), c(5, 100 / 2), c(6, 10 / 2), c(7, 250 / 2)]);
-	assert_eq!(ledger.total, 900 / 2);
-	assert_eq!(LedgerSlashPerEra::get().0, 500 / 2);
-	assert_eq!(
-		LedgerSlashPerEra::get().1,
-		BTreeMap::from([(4, 40 / 2), (5, 100 / 2), (6, 10 / 2), (7, 250 / 2)])
-	);
+	ExtBuilder::default().nominate(true).build_and_execute(|| {
+		let c = |era, value| UnlockChunk::<Balance> { era, value };
+		// Given
+		let mut ledger = StakingLedger::<Test>::new(123, 10);
+		assert_eq!(BondingDuration::get(), 3);
 
-	// slash 1/4th with not chunk.
-	ledger.unlocking = bounded_vec![];
-	ledger.active = 500;
-	ledger.total = 500;
-	// When we have a partial slash that touches all chunks
-	assert_eq!(ledger.slash(500 / 4, 0, 0), 500 / 4);
-	// Then
-	assert_eq!(ledger.active, 3 * 500 / 4);
-	assert_eq!(ledger.unlocking, vec![]);
-	assert_eq!(ledger.total, ledger.active);
-	assert_eq!(LedgerSlashPerEra::get().0, 3 * 500 / 4);
-	assert_eq!(LedgerSlashPerEra::get().1, Default::default());
-
-	// Given we have the same as above,
-	ledger.unlocking = bounded_vec![c(4, 40), c(5, 100), c(6, 10), c(7, 250)];
-	ledger.active = 500;
-	ledger.total = 40 + 10 + 100 + 250 + 500; // 900
-	assert_eq!(ledger.total, 900);
-	// When we have a higher min balance
-	assert_eq!(
-		ledger.slash(
-			900 / 2,
-			25, /* min balance - chunks with era 0 & 2 will be slashed to <=25, causing it to
-			     * get swept */
-			0
-		),
-		450
-	);
-	assert_eq!(ledger.active, 500 / 2);
-	// the last chunk was not slashed 50% like all the rest, because some other earlier chunks got
-	// dusted.
-	assert_eq!(ledger.unlocking, vec![c(5, 100 / 2), c(7, 150)]);
-	assert_eq!(ledger.total, 900 / 2);
-	assert_eq!(LedgerSlashPerEra::get().0, 500 / 2);
-	assert_eq!(
-		LedgerSlashPerEra::get().1,
-		BTreeMap::from([(4, 0), (5, 100 / 2), (6, 0), (7, 150)])
-	);
+		// When we slash a ledger with no unlocking chunks
+		assert_eq!(ledger.slash(5, 1, 0), 5);
+		// Then
+		assert_eq!(ledger.total, 5);
+		assert_eq!(ledger.active, 5);
+		assert_eq!(LedgerSlashPerEra::get().0, 5);
+		assert_eq!(LedgerSlashPerEra::get().1, Default::default());
+
+		// When we slash a ledger with no unlocking chunks and the slash amount is greater then the
+		// total
+		assert_eq!(ledger.slash(11, 1, 0), 5);
+		// Then
+		assert_eq!(ledger.total, 0);
+		assert_eq!(ledger.active, 0);
+		assert_eq!(LedgerSlashPerEra::get().0, 0);
+		assert_eq!(LedgerSlashPerEra::get().1, Default::default());
 
-	// Given
-	// slash order --------------------NA--------2----------0----------1----
-	ledger.unlocking = bounded_vec![c(4, 40), c(5, 100), c(6, 10), c(7, 250)];
-	ledger.active = 500;
-	ledger.total = 40 + 10 + 100 + 250 + 500; // 900
-	assert_eq!(
-		ledger.slash(
-			500 + 10 + 250 + 100 / 2, // active + era 6 + era 7 + era 5 / 2
-			0,
-			3 /* slash era 6 first, so the affected parts are era 6, era 7 and
-			   * ledge.active. This will cause the affected to go to zero, and then we will
-			   * start slashing older chunks */
-		),
-		500 + 250 + 10 + 100 / 2
-	);
-	// Then
-	assert_eq!(ledger.active, 0);
-	assert_eq!(ledger.unlocking, vec![c(4, 40), c(5, 100 / 2)]);
-	assert_eq!(ledger.total, 90);
-	assert_eq!(LedgerSlashPerEra::get().0, 0);
-	assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(5, 100 / 2), (6, 0), (7, 0)]));
-
-	// Given
-	// iteration order------------------NA---------2----------0----------1----
-	ledger.unlocking = bounded_vec![c(4, 100), c(5, 100), c(6, 100), c(7, 100)];
-	ledger.active = 100;
-	ledger.total = 5 * 100;
-	// When
-	assert_eq!(
-		ledger.slash(
-			351, // active + era 6 + era 7 + era 5 / 2 + 1
-			50,  // min balance - everything slashed below 50 will get dusted
-			3    /* slash era 3+3 first, so the affected parts are era 6, era 7 and
-			      * ledge.active. This will cause the affected to go to zero, and then we will
-			      * start slashing older chunks */
-		),
-		400
-	);
-	// Then
-	assert_eq!(ledger.active, 0);
-	assert_eq!(ledger.unlocking, vec![c(4, 100)]);
-	assert_eq!(ledger.total, 100);
-	assert_eq!(LedgerSlashPerEra::get().0, 0);
-	assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(5, 0), (6, 0), (7, 0)]));
-
-	// Tests for saturating arithmetic
-
-	// Given
-	let slash = u64::MAX as Balance * 2;
-	// The value of the other parts of ledger that will get slashed
-	let value = slash - (10 * 4);
-
-	ledger.active = 10;
-	ledger.unlocking = bounded_vec![c(4, 10), c(5, 10), c(6, 10), c(7, value)];
-	ledger.total = value + 40;
-	// When
-	let slash_amount = ledger.slash(slash, 0, 0);
-	assert_eq_error_rate!(slash_amount, slash, 5);
-	// Then
-	assert_eq!(ledger.active, 0); // slash of 9
-	assert_eq!(ledger.unlocking, vec![]);
-	assert_eq!(ledger.total, 0);
-	assert_eq!(LedgerSlashPerEra::get().0, 0);
-	assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(4, 0), (5, 0), (6, 0), (7, 0)]));
-
-	// Given
-	use sp_runtime::PerThing as _;
-	let slash = u64::MAX as Balance * 2;
-	let value = u64::MAX as Balance * 2;
-	let unit = 100;
-	// slash * value that will saturate
-	assert!(slash.checked_mul(value).is_none());
-	// but slash * unit won't.
-	assert!(slash.checked_mul(unit).is_some());
-	ledger.unlocking = bounded_vec![c(4, unit), c(5, value), c(6, unit), c(7, unit)];
-	//--------------------------------------note value^^^
-	ledger.active = unit;
-	ledger.total = unit * 4 + value;
-	// When
-	assert_eq!(ledger.slash(slash, 0, 0), slash);
-	// Then
-	// The amount slashed out of `unit`
-	let affected_balance = value + unit * 4;
-	let ratio =
-		Perquintill::from_rational_with_rounding(slash, affected_balance, Rounding::Up).unwrap();
-	// `unit` after the slash is applied
-	let unit_slashed = {
-		let unit_slash = ratio.mul_ceil(unit);
-		unit - unit_slash
-	};
-	let value_slashed = {
-		let value_slash = ratio.mul_ceil(value);
-		value - value_slash
-	};
-	assert_eq!(ledger.active, unit_slashed);
-	assert_eq!(ledger.unlocking, vec![c(5, value_slashed), c(7, 32)]);
-	assert_eq!(ledger.total, value_slashed + 32);
-	assert_eq!(LedgerSlashPerEra::get().0, 0);
-	assert_eq!(
-		LedgerSlashPerEra::get().1,
-		BTreeMap::from([(4, 0), (5, value_slashed), (6, 0), (7, 32)])
-	);
+		// Given
+		ledger.unlocking = bounded_vec![c(4, 10), c(5, 10)];
+		ledger.total = 2 * 10;
+		ledger.active = 0;
+		// When all the chunks overlap with the slash eras
+		assert_eq!(ledger.slash(20, 0, 0), 20);
+		// Then
+		assert_eq!(ledger.unlocking, vec![]);
+		assert_eq!(ledger.total, 0);
+		assert_eq!(LedgerSlashPerEra::get().0, 0);
+		assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(4, 0), (5, 0)]));
+
+		// Given
+		ledger.unlocking = bounded_vec![c(4, 100), c(5, 100), c(6, 100), c(7, 100)];
+		ledger.total = 4 * 100;
+		ledger.active = 0;
+		// When the first 2 chunks don't overlap with the affected range of unlock eras.
+		assert_eq!(ledger.slash(140, 0, 3), 140);
+		// Then
+		assert_eq!(ledger.unlocking, vec![c(4, 100), c(5, 100), c(6, 30), c(7, 30)]);
+		assert_eq!(ledger.total, 4 * 100 - 140);
+		assert_eq!(LedgerSlashPerEra::get().0, 0);
+		assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(6, 30), (7, 30)]));
+
+		// Given
+		ledger.unlocking = bounded_vec![c(4, 100), c(5, 100), c(6, 100), c(7, 100)];
+		ledger.total = 4 * 100;
+		ledger.active = 0;
+		// When the first 2 chunks don't overlap with the affected range of unlock eras.
+		assert_eq!(ledger.slash(15, 0, 3), 15);
+		// Then
+		assert_eq!(ledger.unlocking, vec![c(4, 100), c(5, 100), c(6, 100 - 8), c(7, 100 - 7)]);
+		assert_eq!(ledger.total, 4 * 100 - 15);
+		assert_eq!(LedgerSlashPerEra::get().0, 0);
+		assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(6, 92), (7, 93)]));
+
+		// Given
+		ledger.unlocking = bounded_vec![c(4, 40), c(5, 100), c(6, 10), c(7, 250)];
+		ledger.active = 500;
+		// 900
+		ledger.total = 40 + 10 + 100 + 250 + 500;
+		// When we have a partial slash that touches all chunks
+		assert_eq!(ledger.slash(900 / 2, 0, 0), 450);
+		// Then
+		assert_eq!(ledger.active, 500 / 2);
+		assert_eq!(
+			ledger.unlocking,
+			vec![c(4, 40 / 2), c(5, 100 / 2), c(6, 10 / 2), c(7, 250 / 2)]
+		);
+		assert_eq!(ledger.total, 900 / 2);
+		assert_eq!(LedgerSlashPerEra::get().0, 500 / 2);
+		assert_eq!(
+			LedgerSlashPerEra::get().1,
+			BTreeMap::from([(4, 40 / 2), (5, 100 / 2), (6, 10 / 2), (7, 250 / 2)])
+		);
+
+		// slash 1/4th with not chunk.
+		ledger.unlocking = bounded_vec![];
+		ledger.active = 500;
+		ledger.total = 500;
+		// When we have a partial slash that touches all chunks
+		assert_eq!(ledger.slash(500 / 4, 0, 0), 500 / 4);
+		// Then
+		assert_eq!(ledger.active, 3 * 500 / 4);
+		assert_eq!(ledger.unlocking, vec![]);
+		assert_eq!(ledger.total, ledger.active);
+		assert_eq!(LedgerSlashPerEra::get().0, 3 * 500 / 4);
+		assert_eq!(LedgerSlashPerEra::get().1, Default::default());
+
+		// Given we have the same as above,
+		ledger.unlocking = bounded_vec![c(4, 40), c(5, 100), c(6, 10), c(7, 250)];
+		ledger.active = 500;
+		ledger.total = 40 + 10 + 100 + 250 + 500; // 900
+		assert_eq!(ledger.total, 900);
+		// When we have a higher min balance
+		assert_eq!(
+			ledger.slash(
+				900 / 2,
+				25, /* min balance - chunks with era 0 & 2 will be slashed to <=25, causing it
+				     * to get swept */
+				0
+			),
+			450
+		);
+		assert_eq!(ledger.active, 500 / 2);
+		// the last chunk was not slashed 50% like all the rest, because some other earlier chunks
+		// got dusted.
+		assert_eq!(ledger.unlocking, vec![c(5, 100 / 2), c(7, 150)]);
+		assert_eq!(ledger.total, 900 / 2);
+		assert_eq!(LedgerSlashPerEra::get().0, 500 / 2);
+		assert_eq!(
+			LedgerSlashPerEra::get().1,
+			BTreeMap::from([(4, 0), (5, 100 / 2), (6, 0), (7, 150)])
+		);
+
+		// Given
+		// slash order --------------------NA--------2----------0----------1----
+		ledger.unlocking = bounded_vec![c(4, 40), c(5, 100), c(6, 10), c(7, 250)];
+		ledger.active = 500;
+		ledger.total = 40 + 10 + 100 + 250 + 500; // 900
+		assert_eq!(
+			ledger.slash(
+				500 + 10 + 250 + 100 / 2, // active + era 6 + era 7 + era 5 / 2
+				0,
+				3 /* slash era 6 first, so the affected parts are era 6, era 7 and
+				   * ledge.active. This will cause the affected to go to zero, and then we will
+				   * start slashing older chunks */
+			),
+			500 + 250 + 10 + 100 / 2
+		);
+		// Then
+		assert_eq!(ledger.active, 0);
+		assert_eq!(ledger.unlocking, vec![c(4, 40), c(5, 100 / 2)]);
+		assert_eq!(ledger.total, 90);
+		assert_eq!(LedgerSlashPerEra::get().0, 0);
+		assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(5, 100 / 2), (6, 0), (7, 0)]));
+
+		// Given
+		// iteration order------------------NA---------2----------0----------1----
+		ledger.unlocking = bounded_vec![c(4, 100), c(5, 100), c(6, 100), c(7, 100)];
+		ledger.active = 100;
+		ledger.total = 5 * 100;
+		// When
+		assert_eq!(
+			ledger.slash(
+				351, // active + era 6 + era 7 + era 5 / 2 + 1
+				50,  // min balance - everything slashed below 50 will get dusted
+				3    /* slash era 3+3 first, so the affected parts are era 6, era 7 and
+				      * ledge.active. This will cause the affected to go to zero, and then we
+				      * will start slashing older chunks */
+			),
+			400
+		);
+		// Then
+		assert_eq!(ledger.active, 0);
+		assert_eq!(ledger.unlocking, vec![c(4, 100)]);
+		assert_eq!(ledger.total, 100);
+		assert_eq!(LedgerSlashPerEra::get().0, 0);
+		assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(5, 0), (6, 0), (7, 0)]));
+
+		// Tests for saturating arithmetic
+
+		// Given
+		let slash = u64::MAX as Balance * 2;
+		// The value of the other parts of ledger that will get slashed
+		let value = slash - (10 * 4);
+
+		ledger.active = 10;
+		ledger.unlocking = bounded_vec![c(4, 10), c(5, 10), c(6, 10), c(7, value)];
+		ledger.total = value + 40;
+		// When
+		let slash_amount = ledger.slash(slash, 0, 0);
+		assert_eq_error_rate!(slash_amount, slash, 5);
+		// Then
+		assert_eq!(ledger.active, 0); // slash of 9
+		assert_eq!(ledger.unlocking, vec![]);
+		assert_eq!(ledger.total, 0);
+		assert_eq!(LedgerSlashPerEra::get().0, 0);
+		assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(4, 0), (5, 0), (6, 0), (7, 0)]));
+
+		// Given
+		use sp_runtime::PerThing as _;
+		let slash = u64::MAX as Balance * 2;
+		let value = u64::MAX as Balance * 2;
+		let unit = 100;
+		// slash * value that will saturate
+		assert!(slash.checked_mul(value).is_none());
+		// but slash * unit won't.
+		assert!(slash.checked_mul(unit).is_some());
+		ledger.unlocking = bounded_vec![c(4, unit), c(5, value), c(6, unit), c(7, unit)];
+		//--------------------------------------note value^^^
+		ledger.active = unit;
+		ledger.total = unit * 4 + value;
+		// When
+		assert_eq!(ledger.slash(slash, 0, 0), slash);
+		// Then
+		// The amount slashed out of `unit`
+		let affected_balance = value + unit * 4;
+		let ratio = Perquintill::from_rational_with_rounding(slash, affected_balance, Rounding::Up)
+			.unwrap();
+		// `unit` after the slash is applied
+		let unit_slashed = {
+			let unit_slash = ratio.mul_ceil(unit);
+			unit - unit_slash
+		};
+		let value_slashed = {
+			let value_slash = ratio.mul_ceil(value);
+			value - value_slash
+		};
+		assert_eq!(ledger.active, unit_slashed);
+		assert_eq!(ledger.unlocking, vec![c(5, value_slashed), c(7, 32)]);
+		assert_eq!(ledger.total, value_slashed + 32);
+		assert_eq!(LedgerSlashPerEra::get().0, 0);
+		assert_eq!(
+			LedgerSlashPerEra::get().1,
+			BTreeMap::from([(4, 0), (5, value_slashed), (6, 0), (7, 32)])
+		);
+	});
 }
 
 #[test]
@@ -7126,7 +7203,7 @@ mod staking_unchecked {
 	fn virtual_bond_does_not_lock() {
 		ExtBuilder::default().build_and_execute(|| {
 			mock::start_active_era(1);
-			assert_eq!(asset::stakeable_balance::<Test>(&10), 1);
+			assert_eq!(asset::total_balance::<Test>(&10), 1);
 			// 10 can bond more than its balance amount since we do not require lock for virtual
 			// bonding.
 			assert_ok!(<Staking as StakingUnchecked>::virtual_bond(&10, 100, &15));
@@ -7265,7 +7342,7 @@ mod staking_unchecked {
 			assert_eq!(asset::staked::<Test>(&200), 1000);
 
 			// migrate them to virtual staker
-			<Staking as StakingUnchecked>::migrate_to_virtual_staker(&200);
+			assert_ok!(<Staking as StakingUnchecked>::migrate_to_virtual_staker(&200));
 			// payee needs to be updated to a non-stash account.
 			assert_ok!(<Staking as StakingInterface>::set_payee(&200, &201));
 
@@ -7292,7 +7369,7 @@ mod staking_unchecked {
 				// 101 is a nominator for 11
 				assert_eq!(initial_exposure.others.first().unwrap().who, 101);
 				// make 101 a virtual nominator
-				<Staking as StakingUnchecked>::migrate_to_virtual_staker(&101);
+				assert_ok!(<Staking as StakingUnchecked>::migrate_to_virtual_staker(&101));
 				// set payee different to self.
 				assert_ok!(<Staking as StakingInterface>::set_payee(&101, &102));
 
@@ -7367,7 +7444,7 @@ mod staking_unchecked {
 				// 101 is a nominator for 11
 				assert_eq!(initial_exposure.others.first().unwrap().who, 101);
 				// make 101 a virtual nominator
-				<Staking as StakingUnchecked>::migrate_to_virtual_staker(&101);
+				assert_ok!(<Staking as StakingUnchecked>::migrate_to_virtual_staker(&101));
 				// set payee different to self.
 				assert_ok!(<Staking as StakingInterface>::set_payee(&101, &102));
 
@@ -7423,7 +7500,7 @@ mod staking_unchecked {
 			// 333 is corrupted
 			assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted);
 			// migrate to virtual staker.
-			<Staking as StakingUnchecked>::migrate_to_virtual_staker(&333);
+			assert_ok!(<Staking as StakingUnchecked>::migrate_to_virtual_staker(&333));
 
 			// recover the ledger won't work for virtual staker
 			assert_noop!(
@@ -8034,8 +8111,7 @@ mod ledger_recovery {
 			// side effects on 333 - ledger, bonded, payee, lock should be intact.
 			assert_eq!(asset::staked::<Test>(&333), lock_333_before); // OK
 			assert_eq!(Bonded::<Test>::get(&333), Some(444)); // OK
-			assert!(Payee::<Test>::get(&333).is_some()); // OK
-
+			assert!(Payee::<Test>::get(&333).is_some());
 			// however, ledger associated with its controller was killed.
 			assert!(Ledger::<Test>::get(&444).is_none()); // NOK
 
@@ -9081,3 +9157,249 @@ mod getters {
 		});
 	}
 }
+
+mod hold_migration {
+	use super::*;
+	use sp_staking::{Stake, StakingInterface};
+
+	#[test]
+	fn ledger_update_creates_hold() {
+		ExtBuilder::default().has_stakers(true).build_and_execute(|| {
+			// GIVEN alice who is a nominator with old currency
+			let alice = 300;
+			bond_nominator(alice, 1000, vec![11]);
+			assert_eq!(asset::staked::<Test>(&alice), 1000);
+			assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 0);
+			// migrate alice currency to legacy locks
+			testing_utils::migrate_to_old_currency::<Test>(alice);
+			// no more holds
+			assert_eq!(asset::staked::<Test>(&alice), 0);
+			assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 1000);
+			assert_eq!(
+				<Staking as StakingInterface>::stake(&alice),
+				Ok(Stake { total: 1000, active: 1000 })
+			);
+
+			// any ledger mutation should create a hold
+			hypothetically!({
+				// give some extra balance to alice.
+				let _ = asset::mint_into_existing::<Test>(&alice, 100);
+
+				// WHEN new fund is bonded to ledger.
+				assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(alice), 100));
+
+				// THEN new hold is created
+				assert_eq!(asset::staked::<Test>(&alice), 1000 + 100);
+				assert_eq!(
+					<Staking as StakingInterface>::stake(&alice),
+					Ok(Stake { total: 1100, active: 1100 })
+				);
+
+				// old locked balance is untouched
+				assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 1000);
+			});
+
+			hypothetically!({
+				// WHEN new fund is unbonded from ledger.
+				assert_ok!(Staking::unbond(RuntimeOrigin::signed(alice), 100));
+
+				// THEN hold is updated.
+				assert_eq!(asset::staked::<Test>(&alice), 1000);
+				assert_eq!(
+					<Staking as StakingInterface>::stake(&alice),
+					Ok(Stake { total: 1000, active: 900 })
+				);
+
+				// old locked balance is untouched
+				assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 1000);
+			});
+
+			// WHEN alice currency is migrated.
+			assert_ok!(Staking::migrate_currency(RuntimeOrigin::signed(1), alice));
+
+			// THEN hold is updated.
+			assert_eq!(asset::staked::<Test>(&alice), 1000);
+			assert_eq!(
+				<Staking as StakingInterface>::stake(&alice),
+				Ok(Stake { total: 1000, active: 1000 })
+			);
+
+			// ensure cannot migrate again.
+			assert_noop!(
+				Staking::migrate_currency(RuntimeOrigin::signed(1), alice),
+				Error::<Test>::AlreadyMigrated
+			);
+
+			// locked balance is removed
+			assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 0);
+		});
+	}
+
+	#[test]
+	fn migrate_removes_old_lock() {
+		ExtBuilder::default().has_stakers(true).build_and_execute(|| {
+			// GIVEN alice who is a nominator with old currency
+			let alice = 300;
+			bond_nominator(alice, 1000, vec![11]);
+			testing_utils::migrate_to_old_currency::<Test>(alice);
+			assert_eq!(asset::staked::<Test>(&alice), 0);
+			assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 1000);
+			let pre_migrate_consumer = System::consumers(&alice);
+			System::reset_events();
+
+			// WHEN alice currency is migrated.
+			assert_ok!(Staking::migrate_currency(RuntimeOrigin::signed(1), alice));
+
+			// THEN
+			// the extra consumer from old code is removed.
+			assert_eq!(System::consumers(&alice), pre_migrate_consumer - 1);
+			// ensure no lock
+			assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 0);
+			// ensure stake and hold are same.
+			assert_eq!(
+				<Staking as StakingInterface>::stake(&alice),
+				Ok(Stake { total: 1000, active: 1000 })
+			);
+			assert_eq!(asset::staked::<Test>(&alice), 1000);
+			// ensure events are emitted.
+			assert_eq!(
+				staking_events_since_last_call(),
+				vec![Event::CurrencyMigrated { stash: alice, force_withdraw: 0 }]
+			);
+
+			// ensure cannot migrate again.
+			assert_noop!(
+				Staking::migrate_currency(RuntimeOrigin::signed(1), alice),
+				Error::<Test>::AlreadyMigrated
+			);
+		});
+	}
+	#[test]
+	fn cannot_hold_all_stake() {
+		// When there is not enough funds to hold all stake, part of the stake if force withdrawn.
+		// At end of the migration, the stake and hold should be same.
+		ExtBuilder::default().has_stakers(true).build_and_execute(|| {
+			// GIVEN alice who is a nominator with old currency.
+			let alice = 300;
+			let stake = 1000;
+			bond_nominator(alice, stake, vec![11]);
+			testing_utils::migrate_to_old_currency::<Test>(alice);
+			assert_eq!(asset::staked::<Test>(&alice), 0);
+			assert_eq!(Balances::balance_locked(STAKING_ID, &alice), stake);
+			// ledger has 1000 staked.
+			assert_eq!(
+				<Staking as StakingInterface>::stake(&alice),
+				Ok(Stake { total: stake, active: stake })
+			);
+
+			// Get rid of the extra ED to emulate all their balance including ED is staked.
+			assert_ok!(Balances::transfer_allow_death(
+				RuntimeOrigin::signed(alice),
+				10,
+				ExistentialDeposit::get()
+			));
+
+			let expected_force_withdraw = ExistentialDeposit::get();
+
+			// ledger mutation would fail in this case before migration because of failing hold.
+			assert_noop!(
+				Staking::unbond(RuntimeOrigin::signed(alice), 100),
+				Error::<Test>::NotEnoughFunds
+			);
+
+			// clear events
+			System::reset_events();
+
+			// WHEN alice currency is migrated.
+			assert_ok!(Staking::migrate_currency(RuntimeOrigin::signed(1), alice));
+
+			// THEN
+			let expected_hold = stake - expected_force_withdraw;
+			// ensure no lock
+			assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 0);
+			// ensure stake and hold are same.
+			assert_eq!(
+				<Staking as StakingInterface>::stake(&alice),
+				Ok(Stake { total: expected_hold, active: expected_hold })
+			);
+			assert_eq!(asset::staked::<Test>(&alice), expected_hold);
+			// ensure events are emitted.
+			assert_eq!(
+				staking_events_since_last_call(),
+				vec![Event::CurrencyMigrated {
+					stash: alice,
+					force_withdraw: expected_force_withdraw
+				}]
+			);
+
+			// ensure cannot migrate again.
+			assert_noop!(
+				Staking::migrate_currency(RuntimeOrigin::signed(1), alice),
+				Error::<Test>::AlreadyMigrated
+			);
+
+			// unbond works after migration.
+			assert_ok!(Staking::unbond(RuntimeOrigin::signed(alice), 100));
+		});
+	}
+
+	#[test]
+	fn virtual_staker_consumer_provider_dec() {
+		// Ensure virtual stakers consumer and provider count is decremented.
+		ExtBuilder::default().has_stakers(true).build_and_execute(|| {
+			// 200 virtual bonds
+			bond_virtual_nominator(200, 201, 500, vec![11, 21]);
+
+			// previously the virtual nominator had a provider inc by the delegation system as
+			// well as a consumer by this pallet.
+			System::inc_providers(&200);
+			System::inc_consumers(&200).expect("has provider, can consume");
+
+			hypothetically!({
+				// migrate 200
+				assert_ok!(Staking::migrate_currency(RuntimeOrigin::signed(1), 200));
+
+				// ensure account does not exist in system anymore.
+				assert_eq!(System::consumers(&200), 0);
+				assert_eq!(System::providers(&200), 0);
+				assert!(!System::account_exists(&200));
+
+				// ensure cannot migrate again.
+				assert_noop!(
+					Staking::migrate_currency(RuntimeOrigin::signed(1), 200),
+					Error::<Test>::AlreadyMigrated
+				);
+			});
+
+			hypothetically!({
+				// 200 has an erroneously extra provider
+				System::inc_providers(&200);
+
+				// causes migration to fail.
+				assert_noop!(
+					Staking::migrate_currency(RuntimeOrigin::signed(1), 200),
+					Error::<Test>::BadState
+				);
+			});
+
+			// 200 is funded for more than ED by a random account.
+			assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(999), 200, 10));
+
+			// it has an extra provider now.
+			assert_eq!(System::providers(&200), 2);
+
+			// migrate 200
+			assert_ok!(Staking::migrate_currency(RuntimeOrigin::signed(1), 200));
+
+			// 1 provider is left, consumers is 0.
+			assert_eq!(System::providers(&200), 1);
+			assert_eq!(System::consumers(&200), 0);
+
+			// ensure cannot migrate again.
+			assert_noop!(
+				Staking::migrate_currency(RuntimeOrigin::signed(1), 200),
+				Error::<Test>::AlreadyMigrated
+			);
+		});
+	}
+}
diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs
index 56f561679cf..02ccdacb01c 100644
--- a/substrate/frame/staking/src/weights.rs
+++ b/substrate/frame/staking/src/weights.rs
@@ -18,27 +18,25 @@
 //! Autogenerated weights for `pallet_staking`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-09-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `runner-obbyq9g6-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024`
 
 // Executed Command:
-// ./target/production/substrate-node
+// target/production/substrate-node
 // benchmark
 // pallet
-// --chain=dev
 // --steps=50
 // --repeat=20
-// --pallet=pallet_staking
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
 // --extrinsic=*
 // --wasm-execution=compiled
 // --heap-pages=4096
-// --output=./substrate/frame/staking/src/weights.rs
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_staking
+// --chain=dev
 // --header=./substrate/HEADER-APACHE2
+// --output=./substrate/frame/staking/src/weights.rs
 // --template=./substrate/.maintain/frame-weight-template.hbs
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
@@ -83,6 +81,7 @@ pub trait WeightInfo {
 	fn force_apply_min_commission() -> Weight;
 	fn set_min_commission() -> Weight;
 	fn restore_ledger() -> Weight;
+	fn migrate_currency() -> Weight;
 }
 
 /// Weights for `pallet_staking` using the Substrate node and recommended hardware.
@@ -92,18 +91,18 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Payee` (r:0 w:1)
 	/// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
 	fn bond() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1042`
-		//  Estimated: `4764`
-		// Minimum execution time: 46_504_000 picoseconds.
-		Weight::from_parts(48_459_000, 4764)
+		//  Measured:  `1068`
+		//  Estimated: `4556`
+		// Minimum execution time: 71_854_000 picoseconds.
+		Weight::from_parts(73_408_000, 4556)
 			.saturating_add(T::DbWeight::get().reads(4_u64))
 			.saturating_add(T::DbWeight::get().writes(4_u64))
 	}
@@ -111,20 +110,20 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListNodes` (r:3 w:3)
 	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListBags` (r:2 w:2)
 	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
 	fn bond_extra() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1990`
+		//  Measured:  `2049`
 		//  Estimated: `8877`
-		// Minimum execution time: 90_475_000 picoseconds.
-		Weight::from_parts(93_619_000, 8877)
+		// Minimum execution time: 127_442_000 picoseconds.
+		Weight::from_parts(130_845_000, 8877)
 			.saturating_add(T::DbWeight::get().reads(9_u64))
 			.saturating_add(T::DbWeight::get().writes(7_u64))
 	}
@@ -138,22 +137,22 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::CurrentEra` (r:1 w:0)
 	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:0)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListNodes` (r:3 w:3)
 	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListBags` (r:2 w:2)
 	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
 	fn unbond() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `2195`
+		//  Measured:  `2151`
 		//  Estimated: `8877`
-		// Minimum execution time: 99_335_000 picoseconds.
-		Weight::from_parts(101_440_000, 8877)
+		// Minimum execution time: 105_259_000 picoseconds.
+		Weight::from_parts(107_112_000, 8877)
 			.saturating_add(T::DbWeight::get().reads(12_u64))
-			.saturating_add(T::DbWeight::get().writes(7_u64))
+			.saturating_add(T::DbWeight::get().writes(6_u64))
 	}
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
@@ -161,21 +160,21 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::CurrentEra` (r:1 w:0)
 	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0)
 	/// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[0, 100]`.
 	fn withdraw_unbonded_update(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1297`
-		//  Estimated: `4764`
-		// Minimum execution time: 50_067_000 picoseconds.
-		Weight::from_parts(52_396_327, 4764)
-			// Standard Error: 1_419
-			.saturating_add(Weight::from_parts(51_406, 0).saturating_mul(s.into()))
+		//  Measured:  `1393`
+		//  Estimated: `4556`
+		// Minimum execution time: 77_158_000 picoseconds.
+		Weight::from_parts(79_140_122, 4556)
+			// Standard Error: 1_688
+			.saturating_add(Weight::from_parts(62_663, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(6_u64))
 			.saturating_add(T::DbWeight::get().writes(2_u64))
 	}
@@ -187,10 +186,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::SlashingSpans` (r:1 w:1)
 	/// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:1)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Validators` (r:1 w:0)
 	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Nominators` (r:1 w:1)
@@ -210,14 +209,14 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `s` is `[0, 100]`.
 	fn withdraw_unbonded_kill(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `2196 + s * (4 ±0)`
+		//  Measured:  `2255 + s * (4 ±0)`
 		//  Estimated: `6248 + s * (4 ±0)`
-		// Minimum execution time: 92_931_000 picoseconds.
-		Weight::from_parts(101_398_156, 6248)
-			// Standard Error: 4_180
-			.saturating_add(Weight::from_parts(1_377_850, 0).saturating_mul(s.into()))
+		// Minimum execution time: 125_396_000 picoseconds.
+		Weight::from_parts(134_915_543, 6248)
+			// Standard Error: 3_660
+			.saturating_add(Weight::from_parts(1_324_736, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(13_u64))
-			.saturating_add(T::DbWeight::get().writes(11_u64))
+			.saturating_add(T::DbWeight::get().writes(12_u64))
 			.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into())))
 			.saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into()))
 	}
@@ -245,10 +244,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn validate() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1372`
+		//  Measured:  `1438`
 		//  Estimated: `4556`
-		// Minimum execution time: 56_291_000 picoseconds.
-		Weight::from_parts(58_372_000, 4556)
+		// Minimum execution time: 68_826_000 picoseconds.
+		Weight::from_parts(71_261_000, 4556)
 			.saturating_add(T::DbWeight::get().reads(11_u64))
 			.saturating_add(T::DbWeight::get().writes(5_u64))
 	}
@@ -261,12 +260,12 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `k` is `[1, 128]`.
 	fn kick(k: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1815 + k * (572 ±0)`
+		//  Measured:  `1848 + k * (572 ±0)`
 		//  Estimated: `4556 + k * (3033 ±0)`
-		// Minimum execution time: 36_218_000 picoseconds.
-		Weight::from_parts(38_811_308, 4556)
-			// Standard Error: 8_352
-			.saturating_add(Weight::from_parts(6_527_398, 0).saturating_mul(k.into()))
+		// Minimum execution time: 46_082_000 picoseconds.
+		Weight::from_parts(49_541_374, 4556)
+			// Standard Error: 7_218
+			.saturating_add(Weight::from_parts(7_281_079, 0).saturating_mul(k.into()))
 			.saturating_add(T::DbWeight::get().reads(2_u64))
 			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into())))
 			.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into())))
@@ -297,12 +296,12 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `n` is `[1, 16]`.
 	fn nominate(n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1866 + n * (102 ±0)`
+		//  Measured:  `1932 + n * (102 ±0)`
 		//  Estimated: `6248 + n * (2520 ±0)`
-		// Minimum execution time: 68_607_000 picoseconds.
-		Weight::from_parts(66_831_185, 6248)
-			// Standard Error: 14_014
-			.saturating_add(Weight::from_parts(4_031_635, 0).saturating_mul(n.into()))
+		// Minimum execution time: 83_854_000 picoseconds.
+		Weight::from_parts(81_387_241, 6248)
+			// Standard Error: 16_811
+			.saturating_add(Weight::from_parts(4_900_554, 0).saturating_mul(n.into()))
 			.saturating_add(T::DbWeight::get().reads(12_u64))
 			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into())))
 			.saturating_add(T::DbWeight::get().writes(6_u64))
@@ -326,10 +325,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn chill() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1816`
+		//  Measured:  `1882`
 		//  Estimated: `6248`
-		// Minimum execution time: 60_088_000 picoseconds.
-		Weight::from_parts(62_471_000, 6248)
+		// Minimum execution time: 73_939_000 picoseconds.
+		Weight::from_parts(75_639_000, 6248)
 			.saturating_add(T::DbWeight::get().reads(9_u64))
 			.saturating_add(T::DbWeight::get().writes(6_u64))
 	}
@@ -341,10 +340,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
 	fn set_payee() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `902`
+		//  Measured:  `935`
 		//  Estimated: `4556`
-		// Minimum execution time: 19_777_000 picoseconds.
-		Weight::from_parts(20_690_000, 4556)
+		// Minimum execution time: 24_592_000 picoseconds.
+		Weight::from_parts(25_092_000, 4556)
 			.saturating_add(T::DbWeight::get().reads(2_u64))
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
@@ -356,10 +355,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
 	fn update_payee() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `969`
+		//  Measured:  `1002`
 		//  Estimated: `4556`
-		// Minimum execution time: 23_705_000 picoseconds.
-		Weight::from_parts(24_409_000, 4556)
+		// Minimum execution time: 29_735_000 picoseconds.
+		Weight::from_parts(30_546_000, 4556)
 			.saturating_add(T::DbWeight::get().reads(3_u64))
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
@@ -369,10 +368,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
 	fn set_controller() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `902`
+		//  Measured:  `935`
 		//  Estimated: `8122`
-		// Minimum execution time: 23_479_000 picoseconds.
-		Weight::from_parts(24_502_000, 8122)
+		// Minimum execution time: 28_728_000 picoseconds.
+		Weight::from_parts(29_709_000, 8122)
 			.saturating_add(T::DbWeight::get().reads(3_u64))
 			.saturating_add(T::DbWeight::get().writes(3_u64))
 	}
@@ -382,8 +381,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 2_675_000 picoseconds.
-		Weight::from_parts(2_802_000, 0)
+		// Minimum execution time: 2_519_000 picoseconds.
+		Weight::from_parts(2_673_000, 0)
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
 	/// Storage: `Staking::ForceEra` (r:0 w:1)
@@ -392,8 +391,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 7_067_000 picoseconds.
-		Weight::from_parts(7_413_000, 0)
+		// Minimum execution time: 8_050_000 picoseconds.
+		Weight::from_parts(8_268_000, 0)
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
 	/// Storage: `Staking::ForceEra` (r:0 w:1)
@@ -402,8 +401,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 6_977_000 picoseconds.
-		Weight::from_parts(7_353_000, 0)
+		// Minimum execution time: 8_131_000 picoseconds.
+		Weight::from_parts(8_349_000, 0)
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
 	/// Storage: `Staking::ForceEra` (r:0 w:1)
@@ -412,8 +411,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 7_071_000 picoseconds.
-		Weight::from_parts(7_463_000, 0)
+		// Minimum execution time: 8_104_000 picoseconds.
+		Weight::from_parts(8_317_000, 0)
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
 	/// Storage: `Staking::Invulnerables` (r:0 w:1)
@@ -423,10 +422,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 2_833_000 picoseconds.
-		Weight::from_parts(3_328_130, 0)
-			// Standard Error: 30
-			.saturating_add(Weight::from_parts(10_058, 0).saturating_mul(v.into()))
+		// Minimum execution time: 2_669_000 picoseconds.
+		Weight::from_parts(3_013_436, 0)
+			// Standard Error: 31
+			.saturating_add(Weight::from_parts(10_704, 0).saturating_mul(v.into()))
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
 	/// Storage: `Staking::Ledger` (r:11800 w:11800)
@@ -438,12 +437,12 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `i` is `[0, 5900]`.
 	fn deprecate_controller_batch(i: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1746 + i * (229 ±0)`
+		//  Measured:  `1779 + i * (229 ±0)`
 		//  Estimated: `990 + i * (7132 ±0)`
-		// Minimum execution time: 5_300_000 picoseconds.
-		Weight::from_parts(5_437_000, 990)
-			// Standard Error: 66_261
-			.saturating_add(Weight::from_parts(30_172_457, 0).saturating_mul(i.into()))
+		// Minimum execution time: 5_101_000 picoseconds.
+		Weight::from_parts(5_368_000, 990)
+			// Standard Error: 75_180
+			.saturating_add(Weight::from_parts(33_781_643, 0).saturating_mul(i.into()))
 			.saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(i.into())))
 			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(i.into())))
 			.saturating_add(Weight::from_parts(0, 7132).saturating_mul(i.into()))
@@ -454,10 +453,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:1)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `System::Account` (r:1 w:1)
 	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Validators` (r:1 w:0)
@@ -479,14 +478,14 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `s` is `[0, 100]`.
 	fn force_unstake(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `2196 + s * (4 ±0)`
+		//  Measured:  `2255 + s * (4 ±0)`
 		//  Estimated: `6248 + s * (4 ±0)`
-		// Minimum execution time: 87_677_000 picoseconds.
-		Weight::from_parts(96_386_462, 6248)
-			// Standard Error: 3_717
-			.saturating_add(Weight::from_parts(1_370_585, 0).saturating_mul(s.into()))
+		// Minimum execution time: 119_955_000 picoseconds.
+		Weight::from_parts(128_392_032, 6248)
+			// Standard Error: 3_773
+			.saturating_add(Weight::from_parts(1_302_488, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(13_u64))
-			.saturating_add(T::DbWeight::get().writes(12_u64))
+			.saturating_add(T::DbWeight::get().writes(13_u64))
 			.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into())))
 			.saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into()))
 	}
@@ -495,12 +494,12 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `s` is `[1, 1000]`.
 	fn cancel_deferred_slash(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `66672`
-		//  Estimated: `70137`
-		// Minimum execution time: 105_086_000 picoseconds.
-		Weight::from_parts(1_167_895_222, 70137)
-			// Standard Error: 77_022
-			.saturating_add(Weight::from_parts(6_487_305, 0).saturating_mul(s.into()))
+		//  Measured:  `66705`
+		//  Estimated: `70170`
+		// Minimum execution time: 139_290_000 picoseconds.
+		Weight::from_parts(959_667_494, 70170)
+			// Standard Error: 56_271
+			.saturating_add(Weight::from_parts(4_798_293, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1_u64))
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
@@ -518,12 +517,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::ErasValidatorReward` (r:1 w:0)
 	/// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:257 w:257)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:257 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
-	/// Storage: `System::Account` (r:257 w:257)
-	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:257 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:257 w:257)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::ErasStakersPaged` (r:1 w:0)
 	/// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Staking::ErasRewardPoints` (r:1 w:0)
@@ -532,29 +529,31 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Payee` (r:257 w:0)
 	/// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:257 w:257)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	/// The range of component `n` is `[0, 256]`.
 	fn payout_stakers_alive_staked(n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `33297 + n * (377 ±0)`
-		//  Estimated: `30944 + n * (3774 ±3)`
-		// Minimum execution time: 154_210_000 picoseconds.
-		Weight::from_parts(192_836_012, 30944)
-			// Standard Error: 40_441
-			.saturating_add(Weight::from_parts(47_646_642, 0).saturating_mul(n.into()))
+		//  Measured:  `33283 + n * (370 ±0)`
+		//  Estimated: `30958 + n * (3566 ±0)`
+		// Minimum execution time: 193_068_000 picoseconds.
+		Weight::from_parts(252_762_568, 30958)
+			// Standard Error: 22_743
+			.saturating_add(Weight::from_parts(81_185_306, 0).saturating_mul(n.into()))
 			.saturating_add(T::DbWeight::get().reads(14_u64))
 			.saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into())))
 			.saturating_add(T::DbWeight::get().writes(4_u64))
 			.saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into())))
-			.saturating_add(Weight::from_parts(0, 3774).saturating_mul(n.into()))
+			.saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into()))
 	}
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Bonded` (r:1 w:0)
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:0)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListNodes` (r:3 w:3)
 	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListBags` (r:2 w:2)
@@ -562,25 +561,25 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `l` is `[1, 32]`.
 	fn rebond(l: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1991 + l * (7 ±0)`
+		//  Measured:  `1947 + l * (7 ±0)`
 		//  Estimated: `8877`
-		// Minimum execution time: 88_337_000 picoseconds.
-		Weight::from_parts(91_391_254, 8877)
-			// Standard Error: 4_485
-			.saturating_add(Weight::from_parts(103_443, 0).saturating_mul(l.into()))
+		// Minimum execution time: 91_151_000 picoseconds.
+		Weight::from_parts(93_596_096, 8877)
+			// Standard Error: 5_313
+			.saturating_add(Weight::from_parts(124_684, 0).saturating_mul(l.into()))
 			.saturating_add(T::DbWeight::get().reads(9_u64))
-			.saturating_add(T::DbWeight::get().writes(7_u64))
+			.saturating_add(T::DbWeight::get().writes(6_u64))
 	}
+	/// Storage: `Staking::VirtualStakers` (r:1 w:1)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Bonded` (r:1 w:1)
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::SlashingSpans` (r:1 w:1)
 	/// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Validators` (r:1 w:0)
 	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Nominators` (r:1 w:1)
@@ -600,14 +599,14 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `s` is `[1, 100]`.
 	fn reap_stash(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `2196 + s * (4 ±0)`
+		//  Measured:  `2255 + s * (4 ±0)`
 		//  Estimated: `6248 + s * (4 ±0)`
-		// Minimum execution time: 98_014_000 picoseconds.
-		Weight::from_parts(102_537_670, 6248)
-			// Standard Error: 3_324
-			.saturating_add(Weight::from_parts(1_353_142, 0).saturating_mul(s.into()))
+		// Minimum execution time: 133_214_000 picoseconds.
+		Weight::from_parts(137_290_527, 6248)
+			// Standard Error: 4_153
+			.saturating_add(Weight::from_parts(1_291_007, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(12_u64))
-			.saturating_add(T::DbWeight::get().writes(11_u64))
+			.saturating_add(T::DbWeight::get().writes(12_u64))
 			.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into())))
 			.saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into()))
 	}
@@ -651,12 +650,12 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0 + n * (720 ±0) + v * (3598 ±0)`
 		//  Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)`
-		// Minimum execution time: 608_575_000 picoseconds.
-		Weight::from_parts(613_663_000, 512390)
-			// Standard Error: 2_286_521
-			.saturating_add(Weight::from_parts(72_108_001, 0).saturating_mul(v.into()))
-			// Standard Error: 227_839
-			.saturating_add(Weight::from_parts(20_314_085, 0).saturating_mul(n.into()))
+		// Minimum execution time: 692_301_000 picoseconds.
+		Weight::from_parts(708_732_000, 512390)
+			// Standard Error: 2_117_299
+			.saturating_add(Weight::from_parts(70_087_600, 0).saturating_mul(v.into()))
+			// Standard Error: 210_977
+			.saturating_add(Weight::from_parts(22_953_405, 0).saturating_mul(n.into()))
 			.saturating_add(T::DbWeight::get().reads(206_u64))
 			.saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into())))
 			.saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into())))
@@ -685,14 +684,14 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `n` is `[500, 1000]`.
 	fn get_npos_voters(v: u32, n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `3175 + n * (911 ±0) + v * (395 ±0)`
+		//  Measured:  `3241 + n * (911 ±0) + v * (395 ±0)`
 		//  Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)`
-		// Minimum execution time: 37_173_756_000 picoseconds.
-		Weight::from_parts(37_488_937_000, 512390)
-			// Standard Error: 467_413
-			.saturating_add(Weight::from_parts(8_086_367, 0).saturating_mul(v.into()))
-			// Standard Error: 467_413
-			.saturating_add(Weight::from_parts(3_108_193, 0).saturating_mul(n.into()))
+		// Minimum execution time: 43_708_472_000 picoseconds.
+		Weight::from_parts(44_048_436_000, 512390)
+			// Standard Error: 493_244
+			.saturating_add(Weight::from_parts(6_697_278, 0).saturating_mul(v.into()))
+			// Standard Error: 493_244
+			.saturating_add(Weight::from_parts(4_559_779, 0).saturating_mul(n.into()))
 			.saturating_add(T::DbWeight::get().reads(201_u64))
 			.saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into())))
 			.saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into())))
@@ -707,12 +706,12 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `v` is `[500, 1000]`.
 	fn get_npos_targets(v: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `979 + v * (50 ±0)`
+		//  Measured:  `1012 + v * (50 ±0)`
 		//  Estimated: `3510 + v * (2520 ±0)`
-		// Minimum execution time: 2_641_258_000 picoseconds.
-		Weight::from_parts(382_882_595, 3510)
-			// Standard Error: 11_991
-			.saturating_add(Weight::from_parts(4_695_820, 0).saturating_mul(v.into()))
+		// Minimum execution time: 2_917_165_000 picoseconds.
+		Weight::from_parts(2_948_999_000, 3510)
+			// Standard Error: 33_372
+			.saturating_add(Weight::from_parts(2_126_909, 0).saturating_mul(v.into()))
 			.saturating_add(T::DbWeight::get().reads(2_u64))
 			.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into())))
 			.saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into()))
@@ -735,8 +734,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 5_753_000 picoseconds.
-		Weight::from_parts(6_529_000, 0)
+		// Minimum execution time: 4_748_000 picoseconds.
+		Weight::from_parts(5_052_000, 0)
 			.saturating_add(T::DbWeight::get().writes(7_u64))
 	}
 	/// Storage: `Staking::MinCommission` (r:0 w:1)
@@ -757,8 +756,8 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 5_212_000 picoseconds.
-		Weight::from_parts(5_451_000, 0)
+		// Minimum execution time: 4_316_000 picoseconds.
+		Weight::from_parts(4_526_000, 0)
 			.saturating_add(T::DbWeight::get().writes(7_u64))
 	}
 	/// Storage: `Staking::Bonded` (r:1 w:0)
@@ -785,10 +784,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn chill_other() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1939`
+		//  Measured:  `2005`
 		//  Estimated: `6248`
-		// Minimum execution time: 73_000_000 picoseconds.
-		Weight::from_parts(75_184_000, 6248)
+		// Minimum execution time: 87_374_000 picoseconds.
+		Weight::from_parts(89_848_000, 6248)
 			.saturating_add(T::DbWeight::get().reads(12_u64))
 			.saturating_add(T::DbWeight::get().writes(6_u64))
 	}
@@ -798,10 +797,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
 	fn force_apply_min_commission() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `691`
+		//  Measured:  `724`
 		//  Estimated: `3510`
-		// Minimum execution time: 13_056_000 picoseconds.
-		Weight::from_parts(13_517_000, 3510)
+		// Minimum execution time: 15_529_000 picoseconds.
+		Weight::from_parts(16_094_000, 3510)
 			.saturating_add(T::DbWeight::get().reads(2_u64))
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
@@ -811,28 +810,51 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 3_201_000 picoseconds.
-		Weight::from_parts(3_442_000, 0)
+		// Minimum execution time: 2_533_000 picoseconds.
+		Weight::from_parts(2_817_000, 0)
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `System::Account` (r:1 w:1)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:0)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:1 w:0)
 	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Locks` (r:1 w:0)
+	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Bonded` (r:1 w:1)
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
+	fn restore_ledger() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `1110`
+		//  Estimated: `4764`
+		// Minimum execution time: 50_105_000 picoseconds.
+		Weight::from_parts(50_966_000, 4764)
+			.saturating_add(T::DbWeight::get().reads(6_u64))
+			.saturating_add(T::DbWeight::get().writes(2_u64))
+	}
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Bonded` (r:1 w:0)
+	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Ledger` (r:1 w:0)
+	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Locks` (r:1 w:1)
+	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `Balances::Freezes` (r:1 w:0)
 	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
-	fn restore_ledger() -> Weight {
+	fn migrate_currency() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1047`
+		//  Measured:  `1246`
 		//  Estimated: `4764`
-		// Minimum execution time: 44_671_000 picoseconds.
-		Weight::from_parts(45_611_000, 4764)
-			.saturating_add(T::DbWeight::get().reads(5_u64))
-			.saturating_add(T::DbWeight::get().writes(4_u64))
+		// Minimum execution time: 94_054_000 picoseconds.
+		Weight::from_parts(96_272_000, 4764)
+			.saturating_add(T::DbWeight::get().reads(6_u64))
+			.saturating_add(T::DbWeight::get().writes(2_u64))
 	}
 }
 
@@ -842,18 +864,18 @@ impl WeightInfo for () {
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Payee` (r:0 w:1)
 	/// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
 	fn bond() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1042`
-		//  Estimated: `4764`
-		// Minimum execution time: 46_504_000 picoseconds.
-		Weight::from_parts(48_459_000, 4764)
+		//  Measured:  `1068`
+		//  Estimated: `4556`
+		// Minimum execution time: 71_854_000 picoseconds.
+		Weight::from_parts(73_408_000, 4556)
 			.saturating_add(RocksDbWeight::get().reads(4_u64))
 			.saturating_add(RocksDbWeight::get().writes(4_u64))
 	}
@@ -861,20 +883,20 @@ impl WeightInfo for () {
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListNodes` (r:3 w:3)
 	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListBags` (r:2 w:2)
 	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
 	fn bond_extra() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1990`
+		//  Measured:  `2049`
 		//  Estimated: `8877`
-		// Minimum execution time: 90_475_000 picoseconds.
-		Weight::from_parts(93_619_000, 8877)
+		// Minimum execution time: 127_442_000 picoseconds.
+		Weight::from_parts(130_845_000, 8877)
 			.saturating_add(RocksDbWeight::get().reads(9_u64))
 			.saturating_add(RocksDbWeight::get().writes(7_u64))
 	}
@@ -888,22 +910,22 @@ impl WeightInfo for () {
 	/// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::CurrentEra` (r:1 w:0)
 	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:0)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListNodes` (r:3 w:3)
 	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListBags` (r:2 w:2)
 	/// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`)
 	fn unbond() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `2195`
+		//  Measured:  `2151`
 		//  Estimated: `8877`
-		// Minimum execution time: 99_335_000 picoseconds.
-		Weight::from_parts(101_440_000, 8877)
+		// Minimum execution time: 105_259_000 picoseconds.
+		Weight::from_parts(107_112_000, 8877)
 			.saturating_add(RocksDbWeight::get().reads(12_u64))
-			.saturating_add(RocksDbWeight::get().writes(7_u64))
+			.saturating_add(RocksDbWeight::get().writes(6_u64))
 	}
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
@@ -911,21 +933,21 @@ impl WeightInfo for () {
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::CurrentEra` (r:1 w:0)
 	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0)
 	/// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[0, 100]`.
 	fn withdraw_unbonded_update(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1297`
-		//  Estimated: `4764`
-		// Minimum execution time: 50_067_000 picoseconds.
-		Weight::from_parts(52_396_327, 4764)
-			// Standard Error: 1_419
-			.saturating_add(Weight::from_parts(51_406, 0).saturating_mul(s.into()))
+		//  Measured:  `1393`
+		//  Estimated: `4556`
+		// Minimum execution time: 77_158_000 picoseconds.
+		Weight::from_parts(79_140_122, 4556)
+			// Standard Error: 1_688
+			.saturating_add(Weight::from_parts(62_663, 0).saturating_mul(s.into()))
 			.saturating_add(RocksDbWeight::get().reads(6_u64))
 			.saturating_add(RocksDbWeight::get().writes(2_u64))
 	}
@@ -937,10 +959,10 @@ impl WeightInfo for () {
 	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::SlashingSpans` (r:1 w:1)
 	/// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:1)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Validators` (r:1 w:0)
 	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Nominators` (r:1 w:1)
@@ -960,14 +982,14 @@ impl WeightInfo for () {
 	/// The range of component `s` is `[0, 100]`.
 	fn withdraw_unbonded_kill(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `2196 + s * (4 ±0)`
+		//  Measured:  `2255 + s * (4 ±0)`
 		//  Estimated: `6248 + s * (4 ±0)`
-		// Minimum execution time: 92_931_000 picoseconds.
-		Weight::from_parts(101_398_156, 6248)
-			// Standard Error: 4_180
-			.saturating_add(Weight::from_parts(1_377_850, 0).saturating_mul(s.into()))
+		// Minimum execution time: 125_396_000 picoseconds.
+		Weight::from_parts(134_915_543, 6248)
+			// Standard Error: 3_660
+			.saturating_add(Weight::from_parts(1_324_736, 0).saturating_mul(s.into()))
 			.saturating_add(RocksDbWeight::get().reads(13_u64))
-			.saturating_add(RocksDbWeight::get().writes(11_u64))
+			.saturating_add(RocksDbWeight::get().writes(12_u64))
 			.saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into())))
 			.saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into()))
 	}
@@ -995,10 +1017,10 @@ impl WeightInfo for () {
 	/// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn validate() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1372`
+		//  Measured:  `1438`
 		//  Estimated: `4556`
-		// Minimum execution time: 56_291_000 picoseconds.
-		Weight::from_parts(58_372_000, 4556)
+		// Minimum execution time: 68_826_000 picoseconds.
+		Weight::from_parts(71_261_000, 4556)
 			.saturating_add(RocksDbWeight::get().reads(11_u64))
 			.saturating_add(RocksDbWeight::get().writes(5_u64))
 	}
@@ -1011,12 +1033,12 @@ impl WeightInfo for () {
 	/// The range of component `k` is `[1, 128]`.
 	fn kick(k: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1815 + k * (572 ±0)`
+		//  Measured:  `1848 + k * (572 ±0)`
 		//  Estimated: `4556 + k * (3033 ±0)`
-		// Minimum execution time: 36_218_000 picoseconds.
-		Weight::from_parts(38_811_308, 4556)
-			// Standard Error: 8_352
-			.saturating_add(Weight::from_parts(6_527_398, 0).saturating_mul(k.into()))
+		// Minimum execution time: 46_082_000 picoseconds.
+		Weight::from_parts(49_541_374, 4556)
+			// Standard Error: 7_218
+			.saturating_add(Weight::from_parts(7_281_079, 0).saturating_mul(k.into()))
 			.saturating_add(RocksDbWeight::get().reads(2_u64))
 			.saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into())))
 			.saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into())))
@@ -1047,12 +1069,12 @@ impl WeightInfo for () {
 	/// The range of component `n` is `[1, 16]`.
 	fn nominate(n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1866 + n * (102 ±0)`
+		//  Measured:  `1932 + n * (102 ±0)`
 		//  Estimated: `6248 + n * (2520 ±0)`
-		// Minimum execution time: 68_607_000 picoseconds.
-		Weight::from_parts(66_831_185, 6248)
-			// Standard Error: 14_014
-			.saturating_add(Weight::from_parts(4_031_635, 0).saturating_mul(n.into()))
+		// Minimum execution time: 83_854_000 picoseconds.
+		Weight::from_parts(81_387_241, 6248)
+			// Standard Error: 16_811
+			.saturating_add(Weight::from_parts(4_900_554, 0).saturating_mul(n.into()))
 			.saturating_add(RocksDbWeight::get().reads(12_u64))
 			.saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into())))
 			.saturating_add(RocksDbWeight::get().writes(6_u64))
@@ -1076,10 +1098,10 @@ impl WeightInfo for () {
 	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn chill() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1816`
+		//  Measured:  `1882`
 		//  Estimated: `6248`
-		// Minimum execution time: 60_088_000 picoseconds.
-		Weight::from_parts(62_471_000, 6248)
+		// Minimum execution time: 73_939_000 picoseconds.
+		Weight::from_parts(75_639_000, 6248)
 			.saturating_add(RocksDbWeight::get().reads(9_u64))
 			.saturating_add(RocksDbWeight::get().writes(6_u64))
 	}
@@ -1091,10 +1113,10 @@ impl WeightInfo for () {
 	/// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
 	fn set_payee() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `902`
+		//  Measured:  `935`
 		//  Estimated: `4556`
-		// Minimum execution time: 19_777_000 picoseconds.
-		Weight::from_parts(20_690_000, 4556)
+		// Minimum execution time: 24_592_000 picoseconds.
+		Weight::from_parts(25_092_000, 4556)
 			.saturating_add(RocksDbWeight::get().reads(2_u64))
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
@@ -1106,10 +1128,10 @@ impl WeightInfo for () {
 	/// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
 	fn update_payee() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `969`
+		//  Measured:  `1002`
 		//  Estimated: `4556`
-		// Minimum execution time: 23_705_000 picoseconds.
-		Weight::from_parts(24_409_000, 4556)
+		// Minimum execution time: 29_735_000 picoseconds.
+		Weight::from_parts(30_546_000, 4556)
 			.saturating_add(RocksDbWeight::get().reads(3_u64))
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
@@ -1119,10 +1141,10 @@ impl WeightInfo for () {
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
 	fn set_controller() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `902`
+		//  Measured:  `935`
 		//  Estimated: `8122`
-		// Minimum execution time: 23_479_000 picoseconds.
-		Weight::from_parts(24_502_000, 8122)
+		// Minimum execution time: 28_728_000 picoseconds.
+		Weight::from_parts(29_709_000, 8122)
 			.saturating_add(RocksDbWeight::get().reads(3_u64))
 			.saturating_add(RocksDbWeight::get().writes(3_u64))
 	}
@@ -1132,8 +1154,8 @@ impl WeightInfo for () {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 2_675_000 picoseconds.
-		Weight::from_parts(2_802_000, 0)
+		// Minimum execution time: 2_519_000 picoseconds.
+		Weight::from_parts(2_673_000, 0)
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
 	/// Storage: `Staking::ForceEra` (r:0 w:1)
@@ -1142,8 +1164,8 @@ impl WeightInfo for () {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 7_067_000 picoseconds.
-		Weight::from_parts(7_413_000, 0)
+		// Minimum execution time: 8_050_000 picoseconds.
+		Weight::from_parts(8_268_000, 0)
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
 	/// Storage: `Staking::ForceEra` (r:0 w:1)
@@ -1152,8 +1174,8 @@ impl WeightInfo for () {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 6_977_000 picoseconds.
-		Weight::from_parts(7_353_000, 0)
+		// Minimum execution time: 8_131_000 picoseconds.
+		Weight::from_parts(8_349_000, 0)
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
 	/// Storage: `Staking::ForceEra` (r:0 w:1)
@@ -1162,8 +1184,8 @@ impl WeightInfo for () {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 7_071_000 picoseconds.
-		Weight::from_parts(7_463_000, 0)
+		// Minimum execution time: 8_104_000 picoseconds.
+		Weight::from_parts(8_317_000, 0)
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
 	/// Storage: `Staking::Invulnerables` (r:0 w:1)
@@ -1173,10 +1195,10 @@ impl WeightInfo for () {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 2_833_000 picoseconds.
-		Weight::from_parts(3_328_130, 0)
-			// Standard Error: 30
-			.saturating_add(Weight::from_parts(10_058, 0).saturating_mul(v.into()))
+		// Minimum execution time: 2_669_000 picoseconds.
+		Weight::from_parts(3_013_436, 0)
+			// Standard Error: 31
+			.saturating_add(Weight::from_parts(10_704, 0).saturating_mul(v.into()))
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
 	/// Storage: `Staking::Ledger` (r:11800 w:11800)
@@ -1188,12 +1210,12 @@ impl WeightInfo for () {
 	/// The range of component `i` is `[0, 5900]`.
 	fn deprecate_controller_batch(i: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1746 + i * (229 ±0)`
+		//  Measured:  `1779 + i * (229 ±0)`
 		//  Estimated: `990 + i * (7132 ±0)`
-		// Minimum execution time: 5_300_000 picoseconds.
-		Weight::from_parts(5_437_000, 990)
-			// Standard Error: 66_261
-			.saturating_add(Weight::from_parts(30_172_457, 0).saturating_mul(i.into()))
+		// Minimum execution time: 5_101_000 picoseconds.
+		Weight::from_parts(5_368_000, 990)
+			// Standard Error: 75_180
+			.saturating_add(Weight::from_parts(33_781_643, 0).saturating_mul(i.into()))
 			.saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(i.into())))
 			.saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(i.into())))
 			.saturating_add(Weight::from_parts(0, 7132).saturating_mul(i.into()))
@@ -1204,10 +1226,10 @@ impl WeightInfo for () {
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:1)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `System::Account` (r:1 w:1)
 	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Validators` (r:1 w:0)
@@ -1229,14 +1251,14 @@ impl WeightInfo for () {
 	/// The range of component `s` is `[0, 100]`.
 	fn force_unstake(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `2196 + s * (4 ±0)`
+		//  Measured:  `2255 + s * (4 ±0)`
 		//  Estimated: `6248 + s * (4 ±0)`
-		// Minimum execution time: 87_677_000 picoseconds.
-		Weight::from_parts(96_386_462, 6248)
-			// Standard Error: 3_717
-			.saturating_add(Weight::from_parts(1_370_585, 0).saturating_mul(s.into()))
+		// Minimum execution time: 119_955_000 picoseconds.
+		Weight::from_parts(128_392_032, 6248)
+			// Standard Error: 3_773
+			.saturating_add(Weight::from_parts(1_302_488, 0).saturating_mul(s.into()))
 			.saturating_add(RocksDbWeight::get().reads(13_u64))
-			.saturating_add(RocksDbWeight::get().writes(12_u64))
+			.saturating_add(RocksDbWeight::get().writes(13_u64))
 			.saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into())))
 			.saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into()))
 	}
@@ -1245,12 +1267,12 @@ impl WeightInfo for () {
 	/// The range of component `s` is `[1, 1000]`.
 	fn cancel_deferred_slash(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `66672`
-		//  Estimated: `70137`
-		// Minimum execution time: 105_086_000 picoseconds.
-		Weight::from_parts(1_167_895_222, 70137)
-			// Standard Error: 77_022
-			.saturating_add(Weight::from_parts(6_487_305, 0).saturating_mul(s.into()))
+		//  Measured:  `66705`
+		//  Estimated: `70170`
+		// Minimum execution time: 139_290_000 picoseconds.
+		Weight::from_parts(959_667_494, 70170)
+			// Standard Error: 56_271
+			.saturating_add(Weight::from_parts(4_798_293, 0).saturating_mul(s.into()))
 			.saturating_add(RocksDbWeight::get().reads(1_u64))
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
@@ -1268,12 +1290,10 @@ impl WeightInfo for () {
 	/// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::ErasValidatorReward` (r:1 w:0)
 	/// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:257 w:257)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:257 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
-	/// Storage: `System::Account` (r:257 w:257)
-	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:257 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:257 w:257)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::ErasStakersPaged` (r:1 w:0)
 	/// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	/// Storage: `Staking::ErasRewardPoints` (r:1 w:0)
@@ -1282,29 +1302,31 @@ impl WeightInfo for () {
 	/// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Payee` (r:257 w:0)
 	/// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:257 w:257)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	/// The range of component `n` is `[0, 256]`.
 	fn payout_stakers_alive_staked(n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `33297 + n * (377 ±0)`
-		//  Estimated: `30944 + n * (3774 ±3)`
-		// Minimum execution time: 154_210_000 picoseconds.
-		Weight::from_parts(192_836_012, 30944)
-			// Standard Error: 40_441
-			.saturating_add(Weight::from_parts(47_646_642, 0).saturating_mul(n.into()))
+		//  Measured:  `33283 + n * (370 ±0)`
+		//  Estimated: `30958 + n * (3566 ±0)`
+		// Minimum execution time: 193_068_000 picoseconds.
+		Weight::from_parts(252_762_568, 30958)
+			// Standard Error: 22_743
+			.saturating_add(Weight::from_parts(81_185_306, 0).saturating_mul(n.into()))
 			.saturating_add(RocksDbWeight::get().reads(14_u64))
 			.saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(n.into())))
 			.saturating_add(RocksDbWeight::get().writes(4_u64))
 			.saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into())))
-			.saturating_add(Weight::from_parts(0, 3774).saturating_mul(n.into()))
+			.saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into()))
 	}
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Bonded` (r:1 w:0)
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:0)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListNodes` (r:3 w:3)
 	/// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`)
 	/// Storage: `VoterList::ListBags` (r:2 w:2)
@@ -1312,25 +1334,25 @@ impl WeightInfo for () {
 	/// The range of component `l` is `[1, 32]`.
 	fn rebond(l: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1991 + l * (7 ±0)`
+		//  Measured:  `1947 + l * (7 ±0)`
 		//  Estimated: `8877`
-		// Minimum execution time: 88_337_000 picoseconds.
-		Weight::from_parts(91_391_254, 8877)
-			// Standard Error: 4_485
-			.saturating_add(Weight::from_parts(103_443, 0).saturating_mul(l.into()))
+		// Minimum execution time: 91_151_000 picoseconds.
+		Weight::from_parts(93_596_096, 8877)
+			// Standard Error: 5_313
+			.saturating_add(Weight::from_parts(124_684, 0).saturating_mul(l.into()))
 			.saturating_add(RocksDbWeight::get().reads(9_u64))
-			.saturating_add(RocksDbWeight::get().writes(7_u64))
+			.saturating_add(RocksDbWeight::get().writes(6_u64))
 	}
+	/// Storage: `Staking::VirtualStakers` (r:1 w:1)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Bonded` (r:1 w:1)
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::SlashingSpans` (r:1 w:1)
 	/// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `Balances::Freezes` (r:1 w:0)
-	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Validators` (r:1 w:0)
 	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Nominators` (r:1 w:1)
@@ -1350,14 +1372,14 @@ impl WeightInfo for () {
 	/// The range of component `s` is `[1, 100]`.
 	fn reap_stash(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `2196 + s * (4 ±0)`
+		//  Measured:  `2255 + s * (4 ±0)`
 		//  Estimated: `6248 + s * (4 ±0)`
-		// Minimum execution time: 98_014_000 picoseconds.
-		Weight::from_parts(102_537_670, 6248)
-			// Standard Error: 3_324
-			.saturating_add(Weight::from_parts(1_353_142, 0).saturating_mul(s.into()))
+		// Minimum execution time: 133_214_000 picoseconds.
+		Weight::from_parts(137_290_527, 6248)
+			// Standard Error: 4_153
+			.saturating_add(Weight::from_parts(1_291_007, 0).saturating_mul(s.into()))
 			.saturating_add(RocksDbWeight::get().reads(12_u64))
-			.saturating_add(RocksDbWeight::get().writes(11_u64))
+			.saturating_add(RocksDbWeight::get().writes(12_u64))
 			.saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into())))
 			.saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into()))
 	}
@@ -1401,12 +1423,12 @@ impl WeightInfo for () {
 		// Proof Size summary in bytes:
 		//  Measured:  `0 + n * (720 ±0) + v * (3598 ±0)`
 		//  Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)`
-		// Minimum execution time: 608_575_000 picoseconds.
-		Weight::from_parts(613_663_000, 512390)
-			// Standard Error: 2_286_521
-			.saturating_add(Weight::from_parts(72_108_001, 0).saturating_mul(v.into()))
-			// Standard Error: 227_839
-			.saturating_add(Weight::from_parts(20_314_085, 0).saturating_mul(n.into()))
+		// Minimum execution time: 692_301_000 picoseconds.
+		Weight::from_parts(708_732_000, 512390)
+			// Standard Error: 2_117_299
+			.saturating_add(Weight::from_parts(70_087_600, 0).saturating_mul(v.into()))
+			// Standard Error: 210_977
+			.saturating_add(Weight::from_parts(22_953_405, 0).saturating_mul(n.into()))
 			.saturating_add(RocksDbWeight::get().reads(206_u64))
 			.saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into())))
 			.saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into())))
@@ -1435,14 +1457,14 @@ impl WeightInfo for () {
 	/// The range of component `n` is `[500, 1000]`.
 	fn get_npos_voters(v: u32, n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `3175 + n * (911 ±0) + v * (395 ±0)`
+		//  Measured:  `3241 + n * (911 ±0) + v * (395 ±0)`
 		//  Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)`
-		// Minimum execution time: 37_173_756_000 picoseconds.
-		Weight::from_parts(37_488_937_000, 512390)
-			// Standard Error: 467_413
-			.saturating_add(Weight::from_parts(8_086_367, 0).saturating_mul(v.into()))
-			// Standard Error: 467_413
-			.saturating_add(Weight::from_parts(3_108_193, 0).saturating_mul(n.into()))
+		// Minimum execution time: 43_708_472_000 picoseconds.
+		Weight::from_parts(44_048_436_000, 512390)
+			// Standard Error: 493_244
+			.saturating_add(Weight::from_parts(6_697_278, 0).saturating_mul(v.into()))
+			// Standard Error: 493_244
+			.saturating_add(Weight::from_parts(4_559_779, 0).saturating_mul(n.into()))
 			.saturating_add(RocksDbWeight::get().reads(201_u64))
 			.saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into())))
 			.saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into())))
@@ -1457,12 +1479,12 @@ impl WeightInfo for () {
 	/// The range of component `v` is `[500, 1000]`.
 	fn get_npos_targets(v: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `979 + v * (50 ±0)`
+		//  Measured:  `1012 + v * (50 ±0)`
 		//  Estimated: `3510 + v * (2520 ±0)`
-		// Minimum execution time: 2_641_258_000 picoseconds.
-		Weight::from_parts(382_882_595, 3510)
-			// Standard Error: 11_991
-			.saturating_add(Weight::from_parts(4_695_820, 0).saturating_mul(v.into()))
+		// Minimum execution time: 2_917_165_000 picoseconds.
+		Weight::from_parts(2_948_999_000, 3510)
+			// Standard Error: 33_372
+			.saturating_add(Weight::from_parts(2_126_909, 0).saturating_mul(v.into()))
 			.saturating_add(RocksDbWeight::get().reads(2_u64))
 			.saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into())))
 			.saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into()))
@@ -1485,8 +1507,8 @@ impl WeightInfo for () {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 5_753_000 picoseconds.
-		Weight::from_parts(6_529_000, 0)
+		// Minimum execution time: 4_748_000 picoseconds.
+		Weight::from_parts(5_052_000, 0)
 			.saturating_add(RocksDbWeight::get().writes(7_u64))
 	}
 	/// Storage: `Staking::MinCommission` (r:0 w:1)
@@ -1507,8 +1529,8 @@ impl WeightInfo for () {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 5_212_000 picoseconds.
-		Weight::from_parts(5_451_000, 0)
+		// Minimum execution time: 4_316_000 picoseconds.
+		Weight::from_parts(4_526_000, 0)
 			.saturating_add(RocksDbWeight::get().writes(7_u64))
 	}
 	/// Storage: `Staking::Bonded` (r:1 w:0)
@@ -1535,10 +1557,10 @@ impl WeightInfo for () {
 	/// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
 	fn chill_other() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1939`
+		//  Measured:  `2005`
 		//  Estimated: `6248`
-		// Minimum execution time: 73_000_000 picoseconds.
-		Weight::from_parts(75_184_000, 6248)
+		// Minimum execution time: 87_374_000 picoseconds.
+		Weight::from_parts(89_848_000, 6248)
 			.saturating_add(RocksDbWeight::get().reads(12_u64))
 			.saturating_add(RocksDbWeight::get().writes(6_u64))
 	}
@@ -1548,10 +1570,10 @@ impl WeightInfo for () {
 	/// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`)
 	fn force_apply_min_commission() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `691`
+		//  Measured:  `724`
 		//  Estimated: `3510`
-		// Minimum execution time: 13_056_000 picoseconds.
-		Weight::from_parts(13_517_000, 3510)
+		// Minimum execution time: 15_529_000 picoseconds.
+		Weight::from_parts(16_094_000, 3510)
 			.saturating_add(RocksDbWeight::get().reads(2_u64))
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
@@ -1561,27 +1583,50 @@ impl WeightInfo for () {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 3_201_000 picoseconds.
-		Weight::from_parts(3_442_000, 0)
+		// Minimum execution time: 2_533_000 picoseconds.
+		Weight::from_parts(2_817_000, 0)
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
-	/// Storage: `Balances::Locks` (r:1 w:1)
-	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
-	/// Storage: `System::Account` (r:1 w:1)
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:0)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:1 w:0)
 	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Locks` (r:1 w:0)
+	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Bonded` (r:1 w:1)
 	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
 	/// Storage: `Staking::Ledger` (r:1 w:1)
 	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
+	fn restore_ledger() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `1110`
+		//  Estimated: `4764`
+		// Minimum execution time: 50_105_000 picoseconds.
+		Weight::from_parts(50_966_000, 4764)
+			.saturating_add(RocksDbWeight::get().reads(6_u64))
+			.saturating_add(RocksDbWeight::get().writes(2_u64))
+	}
+	/// Storage: `Staking::VirtualStakers` (r:1 w:0)
+	/// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Bonded` (r:1 w:0)
+	/// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`)
+	/// Storage: `Staking::Ledger` (r:1 w:0)
+	/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Locks` (r:1 w:1)
+	/// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`)
+	/// Storage: `Balances::Holds` (r:1 w:1)
+	/// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(355), added: 2830, mode: `MaxEncodedLen`)
 	/// Storage: `Balances::Freezes` (r:1 w:0)
 	/// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`)
-	fn restore_ledger() -> Weight {
+	fn migrate_currency() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `1047`
+		//  Measured:  `1246`
 		//  Estimated: `4764`
-		// Minimum execution time: 44_671_000 picoseconds.
-		Weight::from_parts(45_611_000, 4764)
-			.saturating_add(RocksDbWeight::get().reads(5_u64))
-			.saturating_add(RocksDbWeight::get().writes(4_u64))
+		// Minimum execution time: 94_054_000 picoseconds.
+		Weight::from_parts(96_272_000, 4764)
+			.saturating_add(RocksDbWeight::get().reads(6_u64))
+			.saturating_add(RocksDbWeight::get().writes(2_u64))
 	}
 }
\ No newline at end of file
diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs
index 17010a8907f..8e23c6800a9 100644
--- a/substrate/primitives/staking/src/lib.rs
+++ b/substrate/primitives/staking/src/lib.rs
@@ -325,7 +325,7 @@ pub trait StakingUnchecked: StakingInterface {
 	/// Migrate an existing staker to a virtual staker.
 	///
 	/// It would release all funds held by the implementation pallet.
-	fn migrate_to_virtual_staker(who: &Self::AccountId);
+	fn migrate_to_virtual_staker(who: &Self::AccountId) -> DispatchResult;
 
 	/// Book-keep a new bond for `keyless_who` without applying any locks (hence virtual).
 	///
-- 
GitLab


From 4b2febe18c6f2180a31a902433c00c30f8903ef7 Mon Sep 17 00:00:00 2001
From: Guillaume Thiolliere <gui.thiolliere@gmail.com>
Date: Fri, 17 Jan 2025 20:46:28 +0900
Subject: [PATCH 073/116] Make frame crate not use the feature experimental
 (#7177)

We already use it for lots of pallet.

Keeping it feature gated by experimental means we lose the information
of which pallet was using experimental before the migration to frame
crate usage.

We can consider `polkadot-sdk-frame` crate unstable but let's not use
the feature `experimental`.

---------

Co-authored-by: command-bot <>
---
 .../packages/guides/first-pallet/Cargo.toml   |  2 +-
 .../packages/guides/first-runtime/Cargo.toml  |  2 +-
 polkadot/xcm/docs/Cargo.toml                  |  2 +-
 prdoc/pr_7177.prdoc                           | 20 +++++++++++++++++++
 substrate/frame/atomic-swap/Cargo.toml        |  2 +-
 .../frame/examples/frame-crate/Cargo.toml     |  2 +-
 substrate/frame/mixnet/Cargo.toml             |  2 +-
 substrate/frame/multisig/Cargo.toml           |  2 +-
 substrate/frame/node-authorization/Cargo.toml |  2 +-
 substrate/frame/proxy/Cargo.toml              |  2 +-
 substrate/frame/salary/Cargo.toml             |  2 +-
 substrate/frame/src/lib.rs                    |  3 +--
 .../support/test/stg_frame_crate/Cargo.toml   |  2 +-
 13 files changed, 32 insertions(+), 13 deletions(-)
 create mode 100644 prdoc/pr_7177.prdoc

diff --git a/docs/sdk/packages/guides/first-pallet/Cargo.toml b/docs/sdk/packages/guides/first-pallet/Cargo.toml
index a1411580119..e6325c31781 100644
--- a/docs/sdk/packages/guides/first-pallet/Cargo.toml
+++ b/docs/sdk/packages/guides/first-pallet/Cargo.toml
@@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 [dependencies]
 codec = { workspace = true }
 docify = { workspace = true }
-frame = { workspace = true, features = ["experimental", "runtime"] }
+frame = { workspace = true, features = ["runtime"] }
 scale-info = { workspace = true }
 
 [features]
diff --git a/docs/sdk/packages/guides/first-runtime/Cargo.toml b/docs/sdk/packages/guides/first-runtime/Cargo.toml
index 303d5c5e7f5..8ed17dea1b7 100644
--- a/docs/sdk/packages/guides/first-runtime/Cargo.toml
+++ b/docs/sdk/packages/guides/first-runtime/Cargo.toml
@@ -18,7 +18,7 @@ scale-info = { workspace = true }
 serde_json = { workspace = true }
 
 # this is a frame-based runtime, thus importing `frame` with runtime feature enabled.
-frame = { workspace = true, features = ["experimental", "runtime"] }
+frame = { workspace = true, features = ["runtime"] }
 
 # pallets that we want to use
 pallet-balances = { workspace = true }
diff --git a/polkadot/xcm/docs/Cargo.toml b/polkadot/xcm/docs/Cargo.toml
index 6fa7ea9a23a..c3bda50619c 100644
--- a/polkadot/xcm/docs/Cargo.toml
+++ b/polkadot/xcm/docs/Cargo.toml
@@ -18,7 +18,7 @@ xcm-simulator = { workspace = true, default-features = true }
 
 # For building FRAME runtimes
 codec = { workspace = true, default-features = true }
-frame = { features = ["experimental", "runtime"], workspace = true, default-features = true }
+frame = { features = ["runtime"], workspace = true, default-features = true }
 polkadot-parachain-primitives = { workspace = true, default-features = true }
 polkadot-primitives = { workspace = true, default-features = true }
 polkadot-runtime-parachains = { workspace = true, default-features = true }
diff --git a/prdoc/pr_7177.prdoc b/prdoc/pr_7177.prdoc
new file mode 100644
index 00000000000..9ab0be1f20a
--- /dev/null
+++ b/prdoc/pr_7177.prdoc
@@ -0,0 +1,20 @@
+title: Make frame crate not experimental
+doc:
+- audience: Runtime Dev
+  description: |-
+    Frame crate may still be unstable, but it is no longer feature gated by the feature `experimental`.
+crates:
+- name: polkadot-sdk-frame
+  bump: minor
+- name: pallet-salary
+  bump: patch
+- name: pallet-multisig
+  bump: patch
+- name: pallet-proxy
+  bump: patch
+- name: pallet-atomic-swap
+  bump: patch
+- name: pallet-mixnet
+  bump: patch
+- name: pallet-node-authorization
+  bump: patch
diff --git a/substrate/frame/atomic-swap/Cargo.toml b/substrate/frame/atomic-swap/Cargo.toml
index 785bfee71b6..05a38ded91c 100644
--- a/substrate/frame/atomic-swap/Cargo.toml
+++ b/substrate/frame/atomic-swap/Cargo.toml
@@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { workspace = true }
-frame = { workspace = true, features = ["experimental", "runtime"] }
+frame = { workspace = true, features = ["runtime"] }
 scale-info = { features = ["derive"], workspace = true }
 
 [dev-dependencies]
diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml
index f174c6b9054..46db1afc346 100644
--- a/substrate/frame/examples/frame-crate/Cargo.toml
+++ b/substrate/frame/examples/frame-crate/Cargo.toml
@@ -19,7 +19,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 codec = { workspace = true }
 scale-info = { features = ["derive"], workspace = true }
 
-frame = { features = ["experimental", "runtime"], workspace = true }
+frame = { features = ["runtime"], workspace = true }
 
 
 [features]
diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml
index 0ae3b3938c6..33bf7146980 100644
--- a/substrate/frame/mixnet/Cargo.toml
+++ b/substrate/frame/mixnet/Cargo.toml
@@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { features = ["derive", "max-encoded-len"], workspace = true }
-frame = { workspace = true, features = ["experimental", "runtime"] }
+frame = { workspace = true, features = ["runtime"] }
 log = { workspace = true }
 scale-info = { features = ["derive"], workspace = true }
 serde = { features = ["derive"], workspace = true }
diff --git a/substrate/frame/multisig/Cargo.toml b/substrate/frame/multisig/Cargo.toml
index 0d175617c9c..e18e14f2626 100644
--- a/substrate/frame/multisig/Cargo.toml
+++ b/substrate/frame/multisig/Cargo.toml
@@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { workspace = true }
-frame = { workspace = true, features = ["experimental", "runtime"] }
+frame = { workspace = true, features = ["runtime"] }
 scale-info = { features = ["derive"], workspace = true }
 
 # third party
diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml
index 7e55ad17809..86a78e6e361 100644
--- a/substrate/frame/node-authorization/Cargo.toml
+++ b/substrate/frame/node-authorization/Cargo.toml
@@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { features = ["derive"], workspace = true }
-frame = { workspace = true, features = ["experimental", "runtime"] }
+frame = { workspace = true, features = ["runtime"] }
 log = { workspace = true }
 scale-info = { features = ["derive"], workspace = true }
 
diff --git a/substrate/frame/proxy/Cargo.toml b/substrate/frame/proxy/Cargo.toml
index a36b2c1cb9c..3f2565abac8 100644
--- a/substrate/frame/proxy/Cargo.toml
+++ b/substrate/frame/proxy/Cargo.toml
@@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { features = ["max-encoded-len"], workspace = true }
-frame = { workspace = true, features = ["experimental", "runtime"] }
+frame = { workspace = true, features = ["runtime"] }
 scale-info = { features = ["derive"], workspace = true }
 
 [dev-dependencies]
diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml
index 626993a0547..84c55b110c8 100644
--- a/substrate/frame/salary/Cargo.toml
+++ b/substrate/frame/salary/Cargo.toml
@@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { features = ["derive"], workspace = true }
-frame = { workspace = true, features = ["experimental", "runtime"] }
+frame = { workspace = true, features = ["runtime"] }
 log = { workspace = true }
 pallet-ranked-collective = { optional = true, workspace = true }
 scale-info = { features = ["derive"], workspace = true }
diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs
index e3e58fc01b5..18c7bd12394 100644
--- a/substrate/frame/src/lib.rs
+++ b/substrate/frame/src/lib.rs
@@ -106,7 +106,7 @@
 //! [dependencies]
 //! codec = { features = ["max-encoded-len"], workspace = true }
 //! scale-info = { features = ["derive"], workspace = true }
-//! frame = { workspace = true, features = ["experimental", "runtime"] }
+//! frame = { workspace = true, features = ["runtime"] }
 //!
 //! [features]
 //! default = ["std"]
@@ -150,7 +150,6 @@
 //! * `runtime::apis` should expose all common runtime APIs that all FRAME-based runtimes need.
 
 #![cfg_attr(not(feature = "std"), no_std)]
-#![cfg(feature = "experimental")]
 
 #[doc(no_inline)]
 pub use frame_support::pallet;
diff --git a/substrate/frame/support/test/stg_frame_crate/Cargo.toml b/substrate/frame/support/test/stg_frame_crate/Cargo.toml
index f627d29cd56..157361dbd5d 100644
--- a/substrate/frame/support/test/stg_frame_crate/Cargo.toml
+++ b/substrate/frame/support/test/stg_frame_crate/Cargo.toml
@@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { features = ["derive"], workspace = true }
-frame = { features = ["experimental", "runtime"], workspace = true }
+frame = { features = ["runtime"], workspace = true }
 scale-info = { features = ["derive"], workspace = true }
 
 [features]
-- 
GitLab


From d62a90c8c729acd98c7e9a5cab9803b8b211ffc5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= <alex.theissen@me.com>
Date: Fri, 17 Jan 2025 15:36:28 +0100
Subject: [PATCH 074/116] pallet_revive: Bump PolkaVM (#7203)

Update to PolkaVM `0.19`. This version renumbers the opcodes in order to
be in-line with the grey paper. Hopefully, for the last time. This means
that it breaks existing contracts.

---------

Signed-off-by: xermicus <cyrill@parity.io>
Co-authored-by: command-bot <>
Co-authored-by: xermicus <cyrill@parity.io>
---
 Cargo.lock                                    |  91 +++++++++++++++++-
 prdoc/pr_7203.prdoc                           |  13 +++
 substrate/frame/revive/Cargo.toml             |   2 +-
 substrate/frame/revive/fixtures/Cargo.toml    |   2 +-
 .../frame/revive/fixtures/build/_Cargo.toml   |   2 +-
 .../revive/rpc/examples/js/pvm/Errors.polkavm | Bin 7274 -> 7274 bytes
 .../rpc/examples/js/pvm/EventExample.polkavm  | Bin 2615 -> 2615 bytes
 .../rpc/examples/js/pvm/Flipper.polkavm       | Bin 1738 -> 1738 bytes
 .../rpc/examples/js/pvm/FlipperCaller.polkavm | Bin 4584 -> 4584 bytes
 .../rpc/examples/js/pvm/PiggyBank.polkavm     | Bin 5088 -> 5088 bytes
 substrate/frame/revive/uapi/Cargo.toml        |   2 +-
 11 files changed, 105 insertions(+), 7 deletions(-)
 create mode 100644 prdoc/pr_7203.prdoc

diff --git a/Cargo.lock b/Cargo.lock
index 42ed88fb0d0..23271617e92 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -14857,7 +14857,7 @@ dependencies = [
  "pallet-utility 28.0.0",
  "parity-scale-codec",
  "paste",
- "polkavm 0.18.0",
+ "polkavm 0.19.0",
  "pretty_assertions",
  "rlp 0.6.1",
  "scale-info",
@@ -14946,7 +14946,7 @@ name = "pallet-revive-fixtures"
 version = "0.1.0"
 dependencies = [
  "anyhow",
- "polkavm-linker 0.18.0",
+ "polkavm-linker 0.19.0",
  "sp-core 28.0.0",
  "sp-io 30.0.0",
  "toml 0.8.19",
@@ -15061,7 +15061,7 @@ dependencies = [
  "pallet-revive-proc-macro 0.1.0",
  "parity-scale-codec",
  "paste",
- "polkavm-derive 0.18.0",
+ "polkavm-derive 0.19.0",
  "scale-info",
 ]
 
@@ -19933,6 +19933,19 @@ dependencies = [
  "polkavm-linux-raw 0.18.0",
 ]
 
+[[package]]
+name = "polkavm"
+version = "0.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8379bb48ff026aa8ae0645ea45f27920bfd21c82b2e82ed914224bb233d59f83"
+dependencies = [
+ "libc",
+ "log",
+ "polkavm-assembler 0.19.0",
+ "polkavm-common 0.19.0",
+ "polkavm-linux-raw 0.19.0",
+]
+
 [[package]]
 name = "polkavm-assembler"
 version = "0.9.0"
@@ -19960,6 +19973,15 @@ dependencies = [
  "log",
 ]
 
+[[package]]
+name = "polkavm-assembler"
+version = "0.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57513b596cf0bafb052dab48e9c168f473c35f7522e17f70cc9f96603012d9b7"
+dependencies = [
+ "log",
+]
+
 [[package]]
 name = "polkavm-common"
 version = "0.9.0"
@@ -19989,6 +20011,16 @@ dependencies = [
  "polkavm-assembler 0.18.0",
 ]
 
+[[package]]
+name = "polkavm-common"
+version = "0.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a972bd305ba8cbf0de79951d6d49d2abfad47c277596be5a2c6a0924a163abbd"
+dependencies = [
+ "log",
+ "polkavm-assembler 0.19.0",
+]
+
 [[package]]
 name = "polkavm-derive"
 version = "0.9.1"
@@ -20016,6 +20048,15 @@ dependencies = [
  "polkavm-derive-impl-macro 0.18.0",
 ]
 
+[[package]]
+name = "polkavm-derive"
+version = "0.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8d866972a7532d82d05c26b4516563660dd6676d7ab9e64e681d8ef0e29255c"
+dependencies = [
+ "polkavm-derive-impl-macro 0.19.0",
+]
+
 [[package]]
 name = "polkavm-derive-impl"
 version = "0.9.0"
@@ -20052,6 +20093,18 @@ dependencies = [
  "syn 2.0.87",
 ]
 
+[[package]]
+name = "polkavm-derive-impl"
+version = "0.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5cffca9d51b21153395a192b65698457687bc51daa41026629895542ccaa65c2"
+dependencies = [
+ "polkavm-common 0.19.0",
+ "proc-macro2 1.0.86",
+ "quote 1.0.37",
+ "syn 2.0.87",
+]
+
 [[package]]
 name = "polkavm-derive-impl-macro"
 version = "0.9.0"
@@ -20082,6 +20135,16 @@ dependencies = [
  "syn 2.0.87",
 ]
 
+[[package]]
+name = "polkavm-derive-impl-macro"
+version = "0.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cc0dc0cf2e8f4d30874131eccfa36bdabd4a52cfb79c15f8630508abaf06a2a6"
+dependencies = [
+ "polkavm-derive-impl 0.19.0",
+ "syn 2.0.87",
+]
+
 [[package]]
 name = "polkavm-linker"
 version = "0.9.2"
@@ -20128,6 +20191,22 @@ dependencies = [
  "rustc-demangle",
 ]
 
+[[package]]
+name = "polkavm-linker"
+version = "0.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "caec2308f1328b5a667da45322c04fad7ff97ad8b36817d18c7635ea4dd6c6f4"
+dependencies = [
+ "dirs",
+ "gimli 0.31.1",
+ "hashbrown 0.14.5",
+ "log",
+ "object 0.36.1",
+ "polkavm-common 0.19.0",
+ "regalloc2 0.9.3",
+ "rustc-demangle",
+]
+
 [[package]]
 name = "polkavm-linux-raw"
 version = "0.9.0"
@@ -20146,6 +20225,12 @@ version = "0.18.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "23eff02c070c70f31878a3d915e88a914ecf3e153741e2fb572dde28cce20fde"
 
+[[package]]
+name = "polkavm-linux-raw"
+version = "0.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "136ae072ab6fa38e584a06d12b1b216cff19f54d5cd202a8f8c5ec2e92e7e4bb"
+
 [[package]]
 name = "polling"
 version = "2.8.0"
diff --git a/prdoc/pr_7203.prdoc b/prdoc/pr_7203.prdoc
new file mode 100644
index 00000000000..96a3d19472e
--- /dev/null
+++ b/prdoc/pr_7203.prdoc
@@ -0,0 +1,13 @@
+title: 'pallet_revive: Bump PolkaVM'
+doc:
+- audience: Runtime Dev
+  description: Update to PolkaVM `0.19`. This version renumbers the opcodes in order
+    to be in-line with the grey paper. Hopefully, for the last time. This means that
+    it breaks existing contracts.
+crates:
+- name: pallet-revive
+  bump: patch
+- name: pallet-revive-fixtures
+  bump: patch
+- name: pallet-revive-uapi
+  bump: patch
diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml
index 1284f5ee894..49a27cfdaab 100644
--- a/substrate/frame/revive/Cargo.toml
+++ b/substrate/frame/revive/Cargo.toml
@@ -25,7 +25,7 @@ hex = { workspace = true }
 impl-trait-for-tuples = { workspace = true }
 log = { workspace = true }
 paste = { workspace = true }
-polkavm = { version = "0.18.0", default-features = false }
+polkavm = { version = "0.19.0", default-features = false }
 rlp = { workspace = true }
 scale-info = { features = ["derive"], workspace = true }
 serde = { features = [
diff --git a/substrate/frame/revive/fixtures/Cargo.toml b/substrate/frame/revive/fixtures/Cargo.toml
index e17bc88a384..a6f25cc26f3 100644
--- a/substrate/frame/revive/fixtures/Cargo.toml
+++ b/substrate/frame/revive/fixtures/Cargo.toml
@@ -21,7 +21,7 @@ sp-io = { workspace = true, default-features = true, optional = true }
 
 [build-dependencies]
 anyhow = { workspace = true, default-features = true }
-polkavm-linker = { version = "0.18.0" }
+polkavm-linker = { version = "0.19.0" }
 toml = { workspace = true }
 
 [features]
diff --git a/substrate/frame/revive/fixtures/build/_Cargo.toml b/substrate/frame/revive/fixtures/build/_Cargo.toml
index bfb9aaedd6f..483d9775b12 100644
--- a/substrate/frame/revive/fixtures/build/_Cargo.toml
+++ b/substrate/frame/revive/fixtures/build/_Cargo.toml
@@ -14,7 +14,7 @@ edition = "2021"
 [dependencies]
 uapi = { package = 'pallet-revive-uapi', path = "", features = ["unstable-hostfn"], default-features = false }
 common = { package = 'pallet-revive-fixtures-common', path = "" }
-polkavm-derive = { version = "0.18.0" }
+polkavm-derive = { version = "0.19.0" }
 
 [profile.release]
 opt-level = 3
diff --git a/substrate/frame/revive/rpc/examples/js/pvm/Errors.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/Errors.polkavm
index 77de4ff3b1b3fe1f378ae31bbba24ddb38cc6300..48de6e0aa0c6cc1604008ba4e65c237dd557675b 100644
GIT binary patch
literal 7274
zcmc&&4R9OPo!^yY?bAxOSBmUi*^VPCn?wnBOo-Cfx#l=GqdZGF%4k*7y0lzkV&nBk
zqF_6Co!xfK%BiIkQd>B9Z4R0VY5kRBxPh6NgzJRDHFC_%af28<O$#38=(*+EGuIw9
zAtCpFl0OnI%|K=tG<tfg_x|s_|NH&FH}cJ|Q&e*qmHp5R^->jej;gYB{|J$h0%&Az
z6SOZutAZAVb|198b*-(t>+3q|cJGZH=vw*L8mp^c`PbfbsG+f;V|RN;tgWuGp}nE5
zb$44s$HBJ!?J(N3Z{NX=y8BxjvKx_+MY@OTS`Rkt?r5vq-~K>DTU$eYp3`LbEM#9}
zqqV-_Kx?ea`ozRnOvaY`sD~((8m27tR(coxUAmhNn%Yd`rguz#ZT`Kv%JN6c<9XYx
z>re{4i-v65*|*t~_Fvd59D5yy9d9|P{5$ghGv8k@TCh-n3cp=w=bqst=R3|4*J0Q8
zRl8Ru+~e-|+;0}~#aH=_!W}|=*AEYI0Vc|REx=L~ed@@zw1MKiYkE3oYrX;lY=GH9
zZ3FO3&nWJ{WuUKD0*se3rF&sw<!K9b%sH=eNG<9)NC%hzig2cpI~hcoSQC9G!%?QM
z=6NZzl*dFki!H)ge*+87sa&O6v{^0oA1kWran9Bk2bih=@<uq+a#doUqXGyvs4&-(
zp9uL0Q6@Jqk^OY5GIuAlhcX@GXMtU{QK)|~%nM<j3-c(<#5-Ar)|&-=gwsa_eGKX2
z83ScTm`pDdW&S?Ebi?)$j-}zGu-?eUJG&{^BY*-nmfnWklu1JQzz=1Ho$}yLIeN#x
ze=ISc0HezS5Ly=RT5$H`C}W`>mH|sTU<*(jOdWGlAFJE~trpGGYVjO>tcWsQcT3k{
zSY`Eh<p*qV&j13HRxYc`0<Zx?D;Lh*R&C<wED$w<d24>W(MtyqOVbZGZl`OjFFp3;
zO9!F<r{6wq*=?1oJ%aR&=UJ)xNa^F^lp|XG42+xdmjFZbjSxfhjUoGmLvIx5hdaN)
zTV8bJ-~dUvU%DbOlMY?x=&;|Q!u~#J3Chi&m%EM=IjOw>9Mc0}19kCZKz$@j9r9<V
zy&?ZUQXyCw@()1kgZ2ot1T<K~oXo8gz60x`i|fvjZTXP@MWC64_B^y_p`C&@3JumU
zujJO1tc7(Yi|b}#?pM%esJ2*J$e*DaybaL5Mm>123Hn#5I&WJ;$e*UXP3_*0{}rl6
zZfeh6v_8U>&0Ln4f3t(8&~b%a3FKT4Qogsk_1hPpK4O-te>(F0!}%y${eypb>aiz*
zkbqUgul<od-dF`fvaqxc51?bvN1z9w8$-}j(APr0KrIPop1P4><|%zd&_|IzrjK)a
z%L4WKX-03M=+`F^=(H{)`gNFW7U<W<Y(&+28HuT~ESXff0Q?u)^=1y_8`4Ls^s$IO
zUK>+mkHm&z<C!TYa|UG;WF)+q)68I}H)|(vv1;o)rNUuqlR(EuK#GK6wHeV7&ik()
zNz98Zfkbbm==6CT_N>94a_m`+J!RNaiajOR<G~&QdwA?A#-1YVabwRa>~Ud_6MH!9
zDa4)v?8(O-2lm*phs7S)1Yr+@Jyz_=!yXIvn2%G~W9q^l`ejO&7bv}Dp33~t?nald
zUcE|`K?inbj%4pXFgh@aZAd~Qo8x+pil4V+*`B8&oX-U;+j1<+^Avb7%&+lcz8v$b
zF<*xHQp}fN-h+7o^E~E@F<*pvH|AGi-i3K5<~ht4V!i<L`IvWL-i~<|^ES*Q%)?$*
z%;#a=f_XFMO_-<M=vPZTHUW=GqyvlthTgP54H%e_(D!UQ;2G}}msu2Jmn7x^7zR|A
zSp@z*KNerSIg7tUITnLi76lOe`d2@cn3rLguqe<u6muLl1WX5&wz7b^RcS8`*y|Lv
zBw%?+=^%&#&b$nm?gdag6qTU54`791X9oZq17!CDwBCTDUC{`<4S+9<vh{#4P@x8;
zjZxaIikhc%5R7jJkQ)_^#>_r{zFn%`2bRd>MXP<LAD=2kOV;$++}zvDlA9CXVB(E?
za+V8`M4|PT1EA<Kt>3?gK00!A^5O_OdOoqyba;rqc;1$oU@j`QsO>2xyxyS0>-(T3
zpmozZMy@mI`YvKd$M^+^T-Qwy+X6!yR$CAqSL1RwxS6W~Mr{QPynmiz&3ZHVi;<;}
z#`SbkSF`p&e8B>hdBLW`%FG#7ZxQIsBnq#e1RkD+HVW-HeXMl0=j?3#KdY01dWKV9
zK<erEnYesBdyL&1U|1^&-vP?+fA`NMK>BC@EHQ6dA+(WLM?#)d{Skzj0>Jr&0}~%U
zXT3F?JFQDNTlT?`RiIoEZc{rP)@=QZPHLO=CQj<Fys#w?hkf2o>Sx4BjlvJT|I<0C
zM=1$?Kkr6f>h{(TBxY~k^)BFx^ipqHKIx@4PyMgNoP*(Oy%c!MFU(7AU1V{^OKn*X
zEUsQ;@p>=ymt1`t_<C?StDtv5bAkin0QW+0Qw1=d4{plQ4o>MHI3@66S*KJ7jvr{O
zFrL>0zTfL_yG4_zH!qvSy=Jh703^#>>;5D$wdQPoB?w`G3kxWW?hdIB1{f07BHWk%
z4k*5C4v@Uq<^SrUw;}1pb&h2*AO1_(-JEIp?6*uy=2-b3f0USurkpue>7yZiBBGzF
zjU9?5W0SF;gAaV-;)wldJh9>2;ZFKuoXLziE>1(*@l@U-`+su+b{I0<g#GQ^K+y?D
z0{dU{Yq#c1Hmz{)g+w}w+?^X><MNvwU6ltEm%**U(KJLdjy}}u4GCo-K?n)%kidlm
zM@T>+0e}{XFm`1AM`EVwoG%2ADUvLIvHJLIaCbq@x0p=Fz%S20dktC|+PNFXi$f%D
zykWeEkCQlYE*B>tK9KyflTa^7F<1^<7eNFAQze;Stv*(zk4GT$>&=`-nK4^z2+~E8
z_q|{n9P+|*6#a&ZcLI(?iy|EU_4^W&p)}a@XcvJ=hvBq>l;j#Llw)Bv7Rs<tiiHv^
zc(5Q~fyY8I7K*Uo#=<HrxUk^F0*8e{EEHfN9}5mF*s;K3!G;Bd1qKUNEaYLqf(0`c
zOjw{v{ew|f!$bGM`8EDp+pS@ppM&}y-r~wtrn~ark21WCS_0^<EAtZbr)7LTU1t2s
zO=8a7r(naUKmU+(D|xZw>Wv~!<kEjj%=;f-@6Nx-h|>VPT%Q+5zXdGb{RtMo$cXdv
zE1%4ZYo7mu#7w|235vHEaRPR)qLLy5Vod?WpnQlyj)x%TK+m=*Dk(J}t0uKZHsUC%
zg@#lZo=p&aXc;p#kh0bj)*{?D*8)Q&S00w<K-s}Vkb+fhhJp_24|VSh6n3+e+ByfR
zCFGq@KrUHC?R&qMn0GGgFnnx7)yxK@w}Qn#VxO62f0<xO^^zWDXNHkp4>eQ>#p1DX
z2nd~e3rDM`Yt_k!dM1=fF`2ta6>2~&nap~YK}LP9D#|1;$2Vl1%K}tn_jbckotLwi
zK?0jTO^QhR^Qa;rJ%i-T&Ah!-?#L89lPjF{<{9eZ5vZZ(pq88^k80CAg>6yh*B|A)
z2XUzJ)7~uGv%vOur~V7_ADASfo~3@)3rVw=6n8A?p_Y$u|L{{FdGaHD&pf3u9t{ba
ztwdu>2RjKUgG1clxB$*KGstLYwPq{R*mBLjW-x)k0S}IIgJ%WE;xnUY5FE2&(vVZL
zaT@E=?5qAijb<y-Shr>`)*Src5b^1QXSu;wh0L^l(WxiRnvK?2&2G{h=1-D5VbNHd
zW=EQX)foHWID(T0U*!fnh0J*-a|j0WG`mG}STzK11`v5a0J8}plO|WuP`+k&XpRES
zR!CTzL}YeI$V`#f5V38s+Y)EZ15c1kgC^`EOJbb^s$|~|P?4~@=X>u-Ovk00BnkI_
z2PmSKvXOd=|62=g=J>TjRI+bB26~D726`pNe~CyS-nj=3AcBi<^(C-@6NZ;9Q&R+0
zuGAC{1R6~#QDwiTl&Z2<Q_561qA9CYd9$XJt8z$F)~Irorf{mPXo^#nYc<8C%Ce@c
zQsq6G;#TE8N>hqd`GBSrt8xPV@T%Mm9a7~<=xnNNK*y@`D0FsJJ`SBjm0yD{UzMMQ
zu0WO3&=sokIp|b)%b_V?k~7emRJfL=m{rg-O|g)`s400Wyc^IIE3sOwlOfgQlJP!p
z`|l)X-G|HZ?9;^i4;B{VJs08RKm1l=-dzB_2U+WKCCE#c{euZkZ}JkcW?wDo@@9~=
z-V}kqdQ%A0S#Pdd)M(IOqO*{+))JaV1zs!ZO>&lI&oYhPbO7{FZ|tKoT{gWrL4nfi
zE#1_j;)4oja}mACpt5w6Il9pt-SHJVz4=)fgPmTZKv(saG)!@N>p5!C06>?=y<if=
z$g^}%CKI~39Np{^-Gqr0(t~GVCn%j&JMnp-$)w;WOkZG$RlrW0U@ggkxU}jpCMuO_
z#+cZwOk0f!zcQU?On8-Pi!mW6(`I9WQ>IPE1X8AH<$^I>saz;DhBqr03XEaDav|Rs
z_9_<~#;~AVup7gia)DK*kTHamDVve3RHk?%xmlSiHj;j2s>n!sl_|H86qKn|Mv_ye
zTuPcV5=coqjmk<Ty~e2Atfb40O23j`ZB%-dbeT~pDCtt8l2g(p%FBXLiPTnsR?;4@
ziwM_jos*a=A3O~D6tICILTp&hY*~137@%7Hx6Kg)ZCwu?SkeCSIqD}pqHUAtj)vC#
z#J9C47)qTKgQ&vN*LPBUL);iX(HHz(U*ze&=xz1U5~p*cEsQFH$Qwk`S}*w|Q#EVD
zVISMiIh}M+3|fNlS?+=s6Z!Zmd?~BN60C@lS=YKT9QE}FePOY}7amWE;b1U{oc&@L
z9<yJY)90!ZH#z%*i9~Q|5T@whiP40(BPd=oX7jps?i8Iq*e#rbqkTSKGLaCIAs5#d
z43GDp6rG1dYkiy6g<)o6cxN<~I1zI04EtQkpwE{|)rj|mD^8TG5Q{-Ed{0VDCj3rd
zdxfcrV9=BZ?hKN%uHiLI_4_yZlAJTSLK+4_$r<iRgkZhPx%lZ9cSRo+*CqWYxqdOU
z!xBCrz7Ya;cDQ`0ljG5p7)^v+Vuhaahrg5>5Kr{&2&TL?=OeJM+m~7|rJ6w=!HUIO
zjD~jh_awxzlVW%PId@)jcq(WM6O2<S@d$f|Eh&!eBG+*_UDvREYY@YA0AjdLGFTt1
z_XWjA6M%J4>=#p^dk70jSB1~DEXQ!l!X-n&6A6M(_%2}gnyKvb_5W*jrFOB-ur3Ds
zD4*me;$K;j3i|p(B1r8-_Sd(3`yPVw@)wA*ltjGY><_FFiE_Yq;2Og03fLqQI_7qn
GsQ&}g)MbzW

literal 7274
zcmc&&4R9OPo!^yY?bAwjR*LLh*^VPCn?wnBOdO@HbIoyXMtPQU9HUiD>(X+GiH+AE
z*<{xada^0wD^4vrShWR5ZF6v$kk(&ZhZ{T-lW>P9Ic&$w95(|)=GuaXIeKon_RO_M
zO-RW7pX85(OEZue292KH>b?Ja@Be=P?~Q!puNbDel*xWrj(Mqy$uL!xt{)&eQUHz4
zZHM+HXjRan(C&qHsIIm3Kz&_%-GM_#j>gyiwH~RfU;EeA)X~t`(0-t;{fJT5*wEHc
z*LuKcXg_9XZ7|w&`0%mzy8BuivK!HnWx5CIT8}jxXgBJ#w)-0lqoF>}X&Urd=)Qy7
zto02?TaU!8PfUEpG`RC#<^iUY8D=c(E_Ofr9kz=Nnhevp=^fLbntx-ivi#2Sc;0U7
zW|T(nq9NNJ{%!uW{S|w;<B;RH<1Gi1Uy=Wx`Tl~@f~5kq?py2Z!cPRn`Hr*Lb=<XQ
z{ekrf_qh8#_nU=c(N%GqR3X*J|E5C-a8dqi0iI#l!QS1IgDBp!u_HEL^A#B21Kdt#
zH-KjvChkAypsyzZ&dZo4dthSiX(w~Sxu^?BFYGwR2Dku<2&R!cIK-HE6MF|IFs85O
zc^R{k$3+B-Eh1Qd2@5j1P^lO0(2M*h3adJt^Yujmt}1}M5dpPaRk-Jv0D=w5&9(F=
zLVrSx$qh_oKV7;i+`%1WOee&7U{`OH>h-WFg+(DOqA(Ya@f=H<B{CwAQHhKpGM*Vk
znGr72!$rBj3~*hreMI0{I4MjTg?Oxsfjt5!VB^`{$jz7(ln?wcX4okY?v$f<{M*M0
z=Lv8`m4MK)eAj|=A4fS06H@_8Hed@d0!*E7G9T;05~~+3vU<@1d!mprU3W{@aad*b
z$MXX=xMu(XN^6(ZRSDRDp|uNVZ>u*6Y!-+b$-FB+-soimh-cYY;~uuQ`qHCMzH|)w
zfB4nomIGF$+9N4npX8P5-jc`VSx2<`CopcxUjYoUH$n`tH-_vL2WgbpSnTVf<poC$
z4v>`lr7H?I<shoShW&#~*xwH=!MHi}a{MHflimZsaoqqmP!~N4)V*2ikUu-^4f+3$
z3Bk&c|1oI&&>n%7fCg*0)46rhw_#m$d0mEXD~9|p0L>J%NodbPdj{GlG+4u(&#f!o
z1nY{I*UiJ+&!Jspj3Y+K|2os)ZGiqYMnBdB{i{r!*Jud&XBcl&n>XY?&(x?*ZMloq
zM}*Rgmlf_`?Vu@aoX{(Qoa;f#_f)rj>t|2*nw9Dwk34idA4RLb|4&an`Xmriuxi9j
zKd{Fet3XH=o+a=AItG0NdH}jH1U&<NE%ZywieMI*8wqBSAtMqQMP!VO3#4U<dGQQK
zS{U}lDFixARK&grlg$$Q;+T!9dJm^?RhAW#DwBZ!b#~G$fP6z_w2F*H$arl<XT>8G
zLlxthSuS%HWe7?oyqPmxU(B1elec)ib&=8GFuh4)<0BwN%CO#y*off$myZ<g1)f4e
zS{Zh7+J-$Fv8N1sHegRF_LN{xG4^<{N5UQvdy2595PRI%vmSd~*yF?=0ejYAPXYGi
zV~+!S?AXI&4{U<4hr=E#_T*uY1$)dV8SF8|v4?$`A?gxCS{9ki3A-CzzIyd4RR#j=
zobS!vy?3;C3fqu^WIo6B0u!IMWZ7P1B7)BaEZcG{tBVYHF)VKMVzCU18?ac4#S$zQ
zW6^^}35y~Yi?CRTMK>1LW6_00Cl&=PuESyh7W1*_z@i<CJQi(OL|BBqtXRy$q6Ld)
zESj*$y3x;9cx(b5lZXI}y@RA_iRm50oPxe<BY<Z-Ca<z6AGxG(_roxty2>K(_xZ8-
z!p&Lyca&o>m}OA{!Ebr>Lxp=8hAE2@n?o_jVMD-lOfyOY=2oq(Bw(-8I*S9A2efvI
zDBvurfaxIswO#9^s2&DbVc6LYz#ai)HGtL|aI|R;Qg|BxUl`@<0b!s*4VuAeZC0%_
zPiv<b-wq%*Y7erQJM7Q5E7ga=61lu+wa@hT&y=7QYx-<%?rm<x&B<?Y@y3HW%Y{fn
zSkiJ76kTP>eFxd@k?yJK5!5~X&^A-&5Ia3>%S>?7#1^$Z#f7&Fvf(ZL&=SzP*c>C*
znRI=en$Zby2_n~Z6V$fA(1!IE#Kt@0Y8SYfs{u}L1q-}yk>Smx8T`e_N=OqrV$9X7
zJy2h;#AKeg5m=cy%aay~%}k;2mMP%jS!kosPO>LT<~z>K*Z-qFCFy4c{duIHiJy(D
zC$q=cy#bE5((oN%{Qh_UKm(+I{tpWGrWHaPjde8SDb*iAm?;39pFcYB;d9no!#QSM
z!P#;cj;sRZiU`}=;IL-vXLM4#tv7K}f8vELML6v9c2fULozy7&u-d0{Qhkhq9?H9s
zm%6?61BE-3cfAYvBE8g`mQQ-A9kc(da2XiB)=Po6{KCA{u4NY2ywuJuz~Y8w7O(eG
zf6Ubz;OoKRtcTtO%?S=j0NmGsn<{|ud~j2aHgHPEz$t+j%Q~exaQr}Hh4H*5@cmxD
zaf>EVZ(cQtd(2=D0Z5j2)%{-KYR%dFN|M5o5SCCF-4*K80~`%&5#h^!0Tf?02WVdG
z@_%*N+tBplI>)k{5C5_3Zo#yA_FJYEbFBQ2-znVBOgVF`BBLQP5h2giR&-P(E2b)b
z3O?|O=@EPP!w+rEbjH}}hq=s{V|otKj;HdL+5fXsu)~n)ChYI&0*V+M3G9E(|KP1T
zlkIEVdm)j|B6r7D*tqOwM_1(m#bt17a5M{%Okj`pctcWYNRmR5J0uAq$q|xJNCKc`
zDvZ6a|Bu4Wu{mD|9#f`S{&My4+2D>t&bORQ9|6C75!!3eW}szm7%w_#-gv`!5g(^<
zB9n^~5FcoM8KcxIQVf;>*M$(lz*K4GS4+mK$an-Yzn;t)lo_*C3_-d`^S<Y8eM4S&
zj-p>O@fhGpwJ0LsU%anyuQLYh*&U}a5g48%NK0<SQW=&uV5t;KC0HuPk_Sr?mP9NS
zVW|*HZY-_Gk_$^tED2azhou56<zvZ#B|DaQEZMMxu*6}>ilsa(S+Hcrk_k&Jt$#4e
zYIx{gIKReUYr8e9^K($&Em~Z;%5;DJ`%zA`F)IMwd1X=Ie!q&(r^}4b-z4VT^$cwI
z^yeRPZY3{vT)k1miCp?`g?s<w>)rVm8F3nbm+SN5=r@7IyFS6<7a4JWdgYUOan0m!
z6>b8CX;8eyh!e1TwN6@OK&&Z%7?ck&$ngNg9O!vN>!hUyWYx6R$VMEk)51b349_Nr
zKCFtl8c12|DQgko8=HWk;wv$A0hAp)1T9$gW+>>O{?HFygu-r~(OVZFwS>GA3dj|U
zsD1A@3ir-s0>dY^R-NC9NGn+UBlh!i{3{8bRxgvo{P|%->Y;`Tp^Aqq#vvfYNQ=Pg
zXKMASh<-MdNpqPyX%(7+S~8jSEI;J*=c=My@^XA@*10S}MRrdY92L8q#S9YI$uqQw
zWIvB864EnB&fMJFE9H*Lu;+7yGikoaO!q<!y#Tf3Jbl!g78z`da=-W}=RK%Hji2#m
z*`5cszcc$^kpI9W74<wb-UCUqmlk(C?V*;B2!H)!AbIj5a&VC`IFErO!&YqYC4Dgp
zO5c#sH!gwm&HRuv&<4X+YVc)-ePdq&fdlRv7y8agki}<4Q6D&FVlt4^un7k5GVJUB
zKaF84G<dgRFESir-w^fbedmO}SEbCHec7oe&4!INc*AZo9Oh4wJYg|-n_)+WgEu&P
z-#CJk`(73LVp3+B%XGkCo?*8b4y%FS%>bhB2VgcKWoGDA43ux!9fqU8u&tx4O(8lv
zBxPplYlw>76}LTf&fNP1y)<aTe!8S0*4wGr_W)EhtRDRCdkWWn=_X0SecuL(=%s9=
z-s%75lAAkutq@7}?MH79m0xd<!uc;z3B+Ruv*Mx^Yke_n;Dq5-%QT3jtCa@vK%g;5
zv99_JQlhI~gOuuO#2_1Vb%#O9bTwp<jk;Q85J6XoL7ci;YY>;Nss>rFs|O9@*42K-
zAceYm)F4H=nt(r|u699(bae_ko30K*$Ls1Sbaq`m37tb%UxO}RSD%HhKv!p=Tc@iT
z=yZ6?VGuA$cvnSCI$X;jW*zj*AQl=J4U(tBy8(k(snr@Wj#iT^#(V$mzgD=-AFjr;
zPZRGySXz$vLPSu1`zwWecM0?!WKGmckQY(?eF=dyd8t^lua=0q17uB_BJh_qg+QH2
zbJem&gZ@&Tg`~BX(lioytwfsCEX~1H8q#zW^pG_6Gnu%JG$$BPdeYLxEGs^!a5fhq
zO@mC9ZYoDNnxi|pMn{^Tg)!LaH3oE*w9LSiKw2}*vH^fDk9)x+sF7#spiHK83pu*^
z6}kx%Eu{O-!A?**ciO4X15Ks{H)Z+)PptxW+74@J4kS!=I#Lsr+8mdf*rCl?Qxkq|
zE-y9V)#faz2}zqXrzQk#&Xk%!+8nE0NDWtN7uKbQcW4(1Qp0}jLVjx4t6gxUh9&KS
zJvA(77kF(JrG}6;YfB|7wOKKh+@Z}DrILPawlJ0SYP0TCQqpGEr;>s;>(XX~R03%;
z&QxWkHnTBRxkH;NOI7-{nGLB*uQpSfs+6>ul2oOj%@k`dOQ}kvw@R!w;{m&f2+h_7
zg}d@W4D=~r14D$^u$tNO@ZK=MwEAybpa$By1v;=It!#n$QMYW{F1w?l%|G((>JEm|
zr{y3jxAga)mfw)Kg-`Vdf88H>x<7hbeYDu=+-3`-@*wgCk+R84Kgo2>rf}HD4+u^t
z8<c~VAbeK4pv6Q#zH(pMYOw^%qjc7_c??H=13_O{F877U({eZ%Od{ui9EQj2*XH!O
zs^sm?fnXvLTp5HZHh5|@A@2>!*NoY`u6_Gtrw?`ur{QRy&zDRj<YdSt^asP^1E*!@
z@z5sU_RV3K*%saxO(#x;ocqE)S2F1HrPDR?-Qn_6#cRZJP!8XnmXis;6WCs3syrAp
zC4&2c^sH-m4KoA&?Y^YoOs<iJflzUVyAvT;?{Y4G2IT$GK6!J}e_9xjLwhaZQ}P=j
zU}vw(mp(lnP0P_l$R(G5FYOP1DgBsys()`V?X@`{fqmV+^cE%E4Dtw;FW+J`v~Qq0
zA&;Gw!;c~7zH1Io2W?@BaXKyc@)fqEJhq=+$K`Zg!}hH~4A%jO;XcV=eX!mal=~8Z
zbx<CV)1kX53rSbG&$TMYaM~gyL%~xCick1XVE3A-?DO^iYxbx2^Um=1<X}JJQ`}Vi
zYb(-0-#|zPsh!IH`c`k>O;KL`0#TNdiZ`77fi*H!4){KH4PkZ#Z1Q^OZ{02v^M8Oo
BH1Gfb

diff --git a/substrate/frame/revive/rpc/examples/js/pvm/EventExample.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/EventExample.polkavm
index 6dbc5ca8b108c1ad04cc248b735b2d7d4f43f2a4..cea22e46adcad0dc9bf6375e676dd4923c624c89 100644
GIT binary patch
delta 1583
zcmZuxO>7%g5PrK!`qt}EHmx1INof|u&jukdF{N%KkO8T(sZ`l1h#-1!B}=<@E9}C-
z&v}s?zl~%ksYE2(@@rbD5g>WvL{%jYDfH3<5=*%t@WE6Z0;;$n_0R(}Y0T_~3JzUq
zcUEs^zL{@k-fa3deb=8BN=q|J7*EM>g=K_<jm6_Om(0r8d|~UoV{jmc@dP?9X-K?A
zd~aZoX%fa!B-#~F{P#D3+Le!+iz;ucO{(&ns(sn}{Wo&+vaoXV+PZqJaxCdNWfwi^
z8DVy*Q56~+0$ESrUcd78eeLz6=e5VF?rVpWo)L4UC)k#E_GaT!&u$HpAIx@d%5!f5
zHHPj{MWX)s+V8@+(ipl$l?e5R=&;uCN9Sv!;;W~rp}dK!NNfbQfDqM#=?n2dFdk6i
z0V$5nf{X>04YHQPtRP#HSi8feP78M`cnY5k<2e+T5{kUMo5XRZD`sI1(b`qfUjom4
z!Xy1)3iOA<m>I#aM;?H4$7}z)yHmIyl?|w_5cY(T0;<N4+M`MrQU8yK4%?{V-$L_4
zTj}92jv$fh<hUbG2l{7B-3cxW!km5uH4CawLhx*UcK6Tq%9+F?KKaM-$v>XYQKpk}
z;r&2=Y%jqB?T1M`7M4OtG;3y`CYZI2zM~~-5VvG|?9_xZ@(S2}$qW+LmQh@*!aIk2
z*hvQ-FRgGGgH=ctY!CN=E$K}{vLefn__&2WIiHS?-$3#4>xgnN!%^4r$S*rde5%K?
zgtcPKin6r?YftB9-^nfIF6G*tuW{$L)FBeDs-5pJ6u|Atksi(D>G=dbH*K~6hw3Ss
zH4=(?5zvGlFl5=3UHhW{>`A6ivrM8>^!o5tuh(;OWf`Me#4MAqH|v`xq$K`ym!6AK
zBVx8x7c7=gp7~=ti9gzPkc=42_=&(+wDuc5Q(GSxDKW$766kZjUSUSGUP<C_A3BzV
zBWlDLSq2eeqzXb~&amhz(^F=_p$x;9P@W$J^kvt_2m(6GYGg)Y8I|)0A(-<JA^SMj
z2pQm9C8VG8AR&F6D};!=#3e!mQe>GBDJt9omnXReF8jF!E>Ca^Tn4!XE?#bdi_9(X
z@UU!*%mm#K0XM|38^Z5~@VOxlx*-m5C&G}6Jmw-Fagm>Lkq?(#=usDX#3!x(udE9|
zB46h$6RmT;CXwQyq}1MFx|$<Q*K$RshjKGakL3zfk9C@u8c~|nI+g@&<pq^yLU8k%
z&}--BT>wX@TB>ZsplZ~B!P4&joy4UNcbrLM&cTZvIEe4SSTfzIWTOg-3@Bb2c))bS
zX&w=~%t;FpfbC*0cz!O*-J<u7<z+?!60(V8FL}F5a+gT#{4~>}_Uxc-OEzS6L-NUw
z_;o**>nrueq&#|O+k7GA_A;MPUOo*O*xT-;<n7w8_-x74SCQN&0MABY%;$0DPJp@l
zsfm5k)qktCK_c5_11^IBNv?vJW`+eKVUiTkrUpQVuKbn6Ljaw~5nfSQW;(ahTvIzm
z5lRH?az%bg?JV@cvTX!!%~!Y5<w>uEJ`g7d6(mmLAbdP3ER>}ny{OQNL>F?c+<0!1
vE=K4=h*q?CP%UR7W+g0%ESs16_g2Eha}amsLD_&6zqiWV?eNt>&$Gfmk)k}n

delta 1583
zcmZuxPly|36#pi>+ix;i>uj5B(%sq}#XngQ&1|eq7nG<_5(_2SDz4Z=3W1rVWyn{-
zc@qn1^^k0)J1zv=X<F!_h0LZs6dc)VL30RN>cxWx6%VTvZ$b|}Xm`8u&8$Ta9hiB;
z_ul)x_j~VsZ!NqQzV<j*J6jc#bWV6RDG<Wh&4aGZ8}+HGzW(L`JP?v}h8*PMB+!QN
z%M@i1k~B>MZXFf>{mqb1>e^ycvbxgZ2a<I|axa9w_)1v3z%AWqUy<7N1Nq<)*9aD?
z+$qy+aOPD`yHdQheDSrr@t5+!7ayj&8{eA`P8jV0=Q^QNH_Y<`w==4JZ*&K9!8<d=
z9J|9ro<*vy-??eg9P2SL#Ucqd9ycTDYHKneA7!fe3T=>p8C^$0Oo<g)HX6%D#cY(%
zQbQLg2jv)aL~vrT%0stj^Suu3)#)5PoTO(+lFy04(pH{kK@ko83}LNHfk+KKcW@8%
zqXpFONm5W_$)GTT=bqQjH@4>JFws@4E|&}@iHIt5Od2rpC&D5>5H^03n2~i-9a}Gs
zCuxcVm;whqc{Dm)H54znz;W*@7s;Y7g?WO{+Q+y5T&^F>J>XM$7@x|+`RoS;bp5U9
zaArHfUETY6I+f%TBw(~Y8j5p9>*~<{8dJe9*)}^VCr-S8c9%hgY<!7i;|+YzkTCVq
z!6(G(HjcpxCX2RvhtL)VvzTlEN|2p)$f=XX?DTb#oxVnxjb=3MTkiXLGf$5UpbXH-
zfRl#R9CYX9)33{>d|vMMzNEcdd=Gf5A@#naSOC8#`v$C3VJCC!#Jtf#9HtalS<Q*k
zIYi?I$Pmf}xAocZ@x!3ZLn+rYLSec-7!15zp``W=3Z+VOxw&?T&(rs|*oib#Q$|Pf
z!7@4Vi9a^-^xZ8FNzLGlABs+;Tffm`tt%rFHBi+)V4o|^I;iPpJx}}hJxj0&Q!@Zd
zC=vh-6mjr|MUg-$7`jIphc72SJ&EW`zK<G1bSTHQDi0;evQrvR%T8#p!?NQVj99j$
z!LVh=G#IjMQG<Y0vw00T&45xuGepZlmq#rJU3OXyx;$bz=rU?K=n}FVbP+5EJ%Uiq
zXw{e>BI<`2_d`Vd5Me*WZa>5>%Zo7PBTxCr`+VfbeB`|~A9~V<o(S_R|10YPlBhIo
zD5aaWwaRP8o;=^(1Vxeol(=kwl8~#QWMrKwncgyGYMPbfJ%`7(TDrta3A}lW*lX|R
zF?7VL6?8R&Rbwg+R{Zwgd0KmC)0;H#4xarH53-v$mY{f*G#jWWq2jrbd!VRZ^B~aw
z&`S#w!0qBL1ivwaz7hJ?34xl!gk0cVGt@VQKJajI9+b3udeptiyO`B=J}f-o*Z)*#
zwwukoF!{@d@l3(*WhEy*e-tygx#3Sr#clmckJk)kg$P3&@-+3SaL}7O4(IOcWg6x$
z{aY;?QX9I8mqEoOS5VA@>Y&IOP(a#DMd-xEzw&epp>r}=bqPxIa(i)A>KOqn5wepF
z>%7!k7{X<1#(LGpdhzURh$n9cW=BO5n58lN1SMQ3W{jN^S)FGKaz~z)XIV4F780x;
p&&DLZlrri`J^<y4Fuc7IW}d>h?R&b4D}H;G`P<?1-N7fhe*kJcDrNux

diff --git a/substrate/frame/revive/rpc/examples/js/pvm/Flipper.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/Flipper.polkavm
index 488ee684f0c4aee5d64f8b691d048ebefdd51044..67f11e68f117309169a317d415ba547020c1f568 100644
GIT binary patch
delta 1012
zcmbW0PiPZC6vk&ZsWVwym}0WoR>72l<fxs4jiSbjWbl&If)zaEP~5ebSP(XLgfz2(
zloq78EKP4-q)8M}JoMj1Z=olJ2p&A-Bpy6?EchqBG%eCv6g><(ynXZS{J!t)R^L|N
z;-}2Q4C7=>zsz-n@YDH3qe9*C$&%9=KMMt&lM$4l2x-sq;R^(wlX*^JNNbeg#jo86
z^83F?MrEYrC2*Yi{jVe&m66lZi2IRb$(zP!3esj5Nfv2-v<3RoGAE}+)D%%BqEryq
z(Fs;lrdnaDYN{Gl^=5@OS4gu=#>iDpW)Y`prsH;o2xT$XnMI(%DG%v5LBmMP&^}I*
zC;&=5c(1?r?T-u@;6&NpkaldiXag?hA&!_H#D0IQE!le{BX5v28pw~dP|3Tloga|5
z&7nXFY2#+#u_mXOYsEO#F)n&fw86q20X>X>9!`+b;L=6sS~Pe`6UKxv(!xjyBiYs3
zCp0-DE#^nJ!KHn5gx$I;UAvIvp*AOa02Gx2y<ON?nuQm>XUJ!K-(nA(C1+IGrid>h
z5ubwvc9koTS;t3cHL}q98Y|0m^H2)KKoA_+&E}6}g#+Q_K&6Xz3kxGJj61@xg^~Ra
zwCrzW$(`GpFTZ6-xH})Il&VgsT2j^1IhiXrS81~r$}PjRROVvM_0ZCC4EAI6BXS)$
zEG-zD!Mkw&9qJf!4_r|eZRBa*n-AU`m$rF@?tJ9L_SX!#QMAo}a^rv*y&W&Sb3Mbm
zQ?f6?za`rVqMPg#Q!A=URgI~-S(0ruw~0#*YByJjIJJhvspqh&upS}*c&oJ8ntUNQ
eOu5eF8kMW=hHHBtdIpYmU<mvnL0o)zWa}p^WjaIv

delta 1012
zcmbW0PiPZC6vk&ZsWVwym}0WoR>71)$<cHUHi{ZAlEF(-3s&%uLvYs~)DdB4w_qv=
zDP1bXy`-MJNRxO`EcD-Hy-81c5IlIuNj!M)SWp{Znigp-iXMg?-oE*Ee&6?Yvv0F+
z;dAkU$2b|+FLE6rT${@-*Qi@P={c>5(@@Yk8AVx&koH8y&k=Z*6(=dAEmz^iuiYr}
z`@c#?Wx@-xIL-Y2P_i+ZI3<m=pU8QE#F>h;*?CeznxAZezO>A;6iHJgnMhJWTt_EZ
zO_^$msk*5eR5imI4VOq*CFA5WCo_oC0@HE3NQAPO>&zg~;FQO7oTYK36=@$QITQh<
z9=zAz`)<2P1~^f6Fr*zDc5J}KJjM~zquB3HwI%z2Wa2H7#v9}(TF48oYG()JO>-!c
zN7{rLd8WxJ=2|IEb&Q?hxi(nYBcO*7(8CGx8eH0Ot|X%uG+|5%qacjDFmhe3eL`hV
zTFg&wflK@92)j*p+O;b=9&2-=M?g_I(A$Mg6<FN=Q6yjRU5h<*=AAKRn<Bk{M0yq$
z*j24TW*r}))hI#hYpg8ORZxnhKoA_+&E}6}l>_1229++_4J-^Lj61@xg;Dwsw9;>6
zxt-gZuf7*aygMKHysA&CMou*gndwY5T%lnjmRW>pc{G~}*JAUFDcFzGPsnxPu(V)o
z2JgZ7_o!ox3$7@OE-QK~n2TPYkhXb=?tJ9L*0&<L=Gf*xxpBaZ-i{Z+nV#X@DcP6c
z-;(VF(M@)WsU=mXs=-v#^rqYB{s#6O)DBmO7+FPP<OQrctf$C7-tsnD<(G2Zlxs|`
aQn});yLRxgXW(cDhQJ>Z#m?g+n?C^<Dl!xR

diff --git a/substrate/frame/revive/rpc/examples/js/pvm/FlipperCaller.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/FlipperCaller.polkavm
index 38a1098fe3a767aa0af74764bf7247e59f6110b7..29efafd8722db556b949b04c47c88eeec07535a6 100644
GIT binary patch
delta 3011
zcmb_eZ)_Y#6`$R;J#%Y^tXuoMX-vH(m2ryHTDcOO62d{7c=v2YdxxW3KtlfDX7?@;
zn*!y|o|W!y)^Hc6ST4u0=TJf5i~p!XmS|}jLzGX=(X>*9Pw46haUc329}tNsl+><m
z-mc@Mp^<=ylV<nycIM5z-|zR{n`^tS?OI53(KBOG2*<>)ghYh6mnZgB%EZZcji>5E
zUjTy`!h6s@f)M|!blaCOyhqX@tRTLUhYxrD?Lnp18&o2+eZ08H^<+w|;SDOCN3?wb
z(cmm9wO>Nxt@V+h7L02_T??vOFzgs%v4vxX&dfMVs4N+-mPvI6SMxZAC(%AEA)Xq2
zAsh}7MVCluBZ|inGyB}UQcfwQly}S$&7vB`)@Z1OApXZGD`j1HWV@gu{(v|Pk3bxp
z6rbZC+{KJuCzbWqhXNwU!TX4;><-}&(WRC**Q0o6t~kSgHp5Q#vdaa;Dp_QWS`(8q
z<+3taxH?1H@CN)4{FYn{=PagU<QMGn6ih86D@BbSD-Y+@f@v*yN(-7=aJbY_hck{m
zr*+6$hop574UbT2$fb^(|87SdO)Aa>Cq^A}m?or9U?YUZMrO*{d>bpsZc1SZsp?{u
zPyi>byNtB%DHsJ9Syb9lAD^y|yhzJ3btPu>P?s>HkGjrTr}%TB^J$jsWiu+9jkCEh
zo7XLB<*Toe>RiB@26yFxQhhz(PJ6Cn<lS`#!s*kcwz0J+zOoL`0bCc0>q0Q%q(l)&
z>+@g>M&R>Lw?3x{fEf@2T!;V_P9cQ3P#}b4u#{S}A5keS(DqXT4Vpr!Ju8eCzuEA`
z3{Auz@z@%Yg}OkKx??D?UgWu)v7d|fUwFHA?uSCO|98)S<2vSI{mv^lk1q<Fuu?<4
z3t2cB$SJ9^8HvrxY>u$`j|nO#fYNfQuXQw}1+6>IY27-Ej0%r1cQ2mCsG;AJCuiOP
zj=~V!CPN_Lv_#D?OA~6U43N`9IE#GIcD}zF#htKq82AIxS}A6#R)HmSYnmnFn)z53
z(YZ3Bvr~x96d<K;Eqr(p7QV+bBkrWZ6i`46fTORc|1IzdaH8jm8;!xteT13ix56}a
zmX}~_mM7E*GmtujxtW^E+Da66^AM0Pu`rv{*?ioxtuxlNHD7%lSKlPnG<nif*<}n!
z_$mGYM-vJ?*+WnC(xmLn0ARl8KC=Mc-UW@tL3EYk3xQ)_K5|IrV~1q!Cy1KTA)<&)
zfT)#n(~uLYHC^^~>x1f^^<akV1RFJG`LV<T<o_@ZLQa67AHDuz6w9D6g%y8*cV|lG
zIi8x2f+Bi_4XwLJ*eDrDfMW<H;}TS#{Q@&RZUwdb8KD8_1$!Org8~Ru5V;uskQWJe
zq(eC^h=7Z(b)ScPT!1l)sBsQ?nP|KNFE~vFUNSW<A@Gf0XT%OCdTFADbgAcCP0ZAX
z<gSA~qxU867N2CBeC~co9=@&RgwCLDlqEI|=#D8?!J}+K^psn6Pb*M<zyL6enNu97
zfEooTTRKf<ImgIyVgnZ~b_X69`cF!@TL^nL?k$AJ|MJf$Zr`{ggl7-<^uLXK{()Yv
zdjQJtQpb!1NaY%gcVJXtTmqUKeAauP21h5?S@8++F^_T%DCdE4-lLrNDEAr-541o~
z`}s}Ed7zy4C<l8T?1MZ|&U=(UA$+{X`<y~w@fe1-;qg5M3`0pahndH38TftjkHTiB
z*^7%GMDcIea>YN3a)u@1%!LR5b(NLlY&uLq&rm_aek%cGK?;mauoL0xc>?-oC)Mij
z?^UtmYv2S}is8d+&P>h(pDM^!m!4{c?Vef#oW_h{s6&ub>cu>U9wUST84>5e+VdWO
zaT6qa1I9HNb)@)FApjPlf>U@=NgM#hmCQQw!;(2&+W%MHOC>L}4$S={ivREt8<Fh(
zR$G;9?EyRTpqnC)>+ZDV&dbniKtHzHZfk9JBxnzGWZVLQ679}Q?xI}1AXaCI3wlfQ
zw#wO>9pUW(;geHrt<jD&*aJ=WAb9p#n7b&sZ^_jw&9{2kX~EWZ*b&(tc*q{y`N=tU
z<kR**r#+Z?*zWIg=Lu}>z9qRS8LAVM4zECs2JHTT9U=CBXb(1fVSxSiHm_kjvdtc7
zu?Ih8tK08*D9F_cxfMkoTWRYoL|onF#Xafug{2wnw_Csd{b=>*RsjuLe~;qw>Q>tP
zH$6JJ>ZJUNsp`^Kdtv#pRgc|-T5(D7#lTSUXWNMErH2_$=euC$h^tG_yz+h&|7j&x
zytr-0|4V!jR*XEq0$cg|JF#+?{?x}|$D`g?(4W`NZtE`toyFH54<Zl0oDZ#l+{<Yt
zMs}|_X~<}5>bexU{x>Mtu-;$jT`@Sw189XgGk)uWG3Z|~YE)8ayG)_be@ABDmDng@
zr`QXO$?W@tO-Q9Tx@vDUS6;=nqm9dpjg_xwNNrXeKVFAxYh@VKhMOvGbLB+KlHFXh
z8<&66SW#hiW9`+Z+8JEg!&iPu;L=<9Uely88NX_f>a%$A%GGpptohrlgVavprI$$U
zrNDANP&<g1_LACOaXBw89rUiV%cEjU9H5=B)fVH2<<CQ4)5tRX+mdyRJ!H+Zoy1x^
m(Cpn?MYy*f2<Y&@a64j>!rEmPluE_Z+mCb3uWxL>$^8p;<uKy_

literal 4584
zcmcJSYj7LY702)H%HGx532S4?n-GXB)*jnsz>0t-JUpsvHoJ;}m2g1Nhizg<l4FQ>
z<De^t&KMA5IYJ&s1Z<~#NNE^9`k|Src_r;cZ9PfKV<u$=8irxU?eKvQv_PRWBqZ*C
z*KraG9|}V?I{UbL_uPBV{hf2q8h77L$omz<`7u2CeTWoE$Tjrt3cJMxsf73-*FwGz
zX@T4c>40p93__lOj6=3`_4V1((cLxB)v;xJfBNEG-}bKVi+4McgFRceclUJI7xA8f
zjvWKrt*&^F)w8u@Ao)N~M_<p@0jp<Vr}f}PWtZOD@9pU6>g!l)+yNEI2Oiit&~<-b
zkMoAza$e~}U41)yItHw+2X}1kv8<l%k_vWo(q+$Pbbo1gPk-O`bm`bT*RZ2AKOqm1
zUE~e2in*DIGFzBC*&KVA{VQ9-Rl341p=5K3x3sqO&!vt0Yy2wV55fr{>9*WC_owcN
z_+#<uvNy{j<=e{tQtn-G(~7^ZxNhZrD<50Af93Sb?(`#rQjm{|KM0D1FcZ5s%uNXC
z;kARQg?4zA5rh0XvOyq(-AAi#<#Fz&LEcB$xnVrGc(sn?Dn2qL!SoF7WP*H9FeG-W
znHLBva!fNX5%%U1AK@Y;ydk;VhE%$Z%~gaJP3a|KdJYoPJ56$)A{ne$==SQ~nC>-n
zucmuL>3B#iXK37@i5N|4v^P|o5Q<ZLahQ+tqhy1v*^I9b@}Z#MGbACn7~wO7Cf4VN
zeWNK~E@fMi+ciYyckzLAupATGoeHD^hiW*Du+3s84(=Fq^ffi}24REz5HTe2gL4u7
zRvK?gr-p20LAS^-IB%AH4NJzAzk(iDq!LX*p21W1d7D@M=3JEj83V@*5obcuv*`B;
z?Q5dPvV<NVB7t;ZcV=pI!k5V&n-Z${Mf^Z0IWo?}tPvU&enF3oq3Z+*q|A7IU>N4<
zUaEWhb#FrVcIK)VU{bfKSIc^}q*n_%-@VI>%em@P|J_a)v)7kCl-`w2bec&i=w1x+
zVhK&i4-Hq*tej8zXi_SU^EBy$SM|CHqSuW<vXCK?TeUDaxzN4GoRH0NiN@>Aae>Cy
zo8#$y=>*J!wlW_SmMuHK@`bYzo?oQBO>|15(=j>|qO(R|S75mKqEMW12PQGfXx3MJ
z*`1%XM~n(@pG(8Y^@db6h<9H-2baUmv%>($@%aX!im(x31%@D8IwgugVZ9BPH;n#2
zU+zx@tctS=0v0%(brvN^WUNcBV(1G~qMYgJ=S**c%Xx-4hqC2auO`sc;0n6&W>5r9
zH5_=T;Q+%10k>6v-uVqR^Pt5{#vF9oWp05pdyRD5#{>nDWm555n8?OMub<qX=OP<_
zxAR9I@=SE&V{@mr9_E(T%#3SaS~l1Nw0ku=CDCb_&Iok&61X4XKz%B8SG9|BdR>gs
z>kNnn2{B*Cz@Qm7zLtUXtPMa2m@X|pVu?y-B1EZRCNv8F8y9e0^U0YAzXl_R00-hH
zkfI4Kkfli@FiCr3dgBl=XC{a_Jx0u_>_yD4I*j+;V`)5=rZyTj7x2A<>NhicH<T=y
z{Rj&pA!Cp%WQhG1W)HE;W+x0fEMYEmSTN%uim-y&OIgEr_H=}=V_~2JZivnpbT$@v
zIFJiW24;&d^Tk($B6ZL`#=|jA^7)K0lRmSr-rUw?_R8rggu6qs{~(mT4NhYqd5#LI
zyjGR3QspaEd5tPxp~|aOd6g>5s$8wgURCy}a+NAyuF93FT%pR6Dqp6`D^+=gDwnHr
znJSB_>{eyy;Z?a*l}l9FrOMo9Rc52A%sfrZgro{Su@s9INaZPFS_@2$IsxXaTYjIt
zl1MISGvm{2fASz}CicUlCm|;w$00>FD7X+@hNOJ>d4!h%wa>Y&TfnWG(drghpI#?(
zi+Y`89|ZeAxJ9kb$H_V9GZP9xZL*u|xy4+ZaPSA0ix-igZ{cXdW{%V9S~(Z6L;C{S
z+c?AzCo)m~b5<0X&DFjUPQ>~cdfiJ%LI)w!#EkDJcKV5*zzHTC$4NHhMFQOfBNLiz
zGLt8fx9cA)r3s^5UVB@8`dbBExdY$kYS(Hz%?C+i{lY0b-88;)D$28^m<~k%_Cpeo
z&TkWlofiTzX;5U;35iZ(-3EPuteqQoi+0#e<VSo+%xHkG`V(UekTv63<a5L99b(e)
zAx3mD5q>K|dWqp{Hk^v^_apg!6Nc9<em%q2e(>)I?^*nRGdx28@rO7t?=4aMI&m-t
zrS_JOb;n8Tj~|4uPeM*WjzfxobBWcaA7Y|?*an<UVciY5`{_BLicT_XlWHFX`#@Nm
zR8{~l3`hm>O?GoVYm;i15G-hdn{Q#4NM%7PYm<uh1+=%ZAeFUAy^gzdJ9bb+kzj*~
z9p1)t3}9mWOmsS2PJqkn-{O|ah8v&!EW-cxY$_33-muOu-}*s3_~luejL~rz4l*=4
z5u=kK6VZ$m9dhabgb0#K_inl^RD4N5bkjbq_@}SiSYv0Q1SCcIJI<!3QVHnP5Pybr
zQw0XP=`7saAMZq>N0!pC-=ad`u!Hg`G}gZd@0<ebA3@%SED)b#Cc$#ROiaQyQ)1vb
zCYM-Pia(h#q+9-hEP}qsBF!`ZitulILAxbuV}+$jmR@OvFVCk0B+vY$l%JJR5dd*Z
z^H_S774}-2s`FU^8(@A`${&`Chs5HvkViyN*3uYDx5BKoiTi37OE0m)E^AY%)dt0O
zosmB*<zJJFua=!JG)6c}UuA`5Ytw40?TWAVvBFnco7PxuS6Lg^=4S<roPSNqr{v->
zyCk#oecEkpbX#G;+9X<SWwsG$zc9*+ycJ$yZ7R3gR$AI+7d2$%;&I`;5yO0d2KK?;
z;?=g*8GF|~GQ~S3k+Yo?&MZfAX#1xTe&Wn>B>yjERPUK|%GrN3LuzTl>-V0qS2n55
zjbq>V?l&KVErM1>IcS)P*n`7sF?jd5A+^qZ65-!HZR35u1Xwmo-G?WCJpCQ<4d+FL
zou@IpgZQ@7UqW2F7GvFGKLz4;O>zNo6ax<8EW#)>F&sRNfEuN~sL*gaO_6HMgkebG
z5C4h%53f7#*`+0eNJBZ}L@ly+khD34<w;ZWnI75nN_2-z2PB#nXg_^|J}T441-e_x
z9bG$rv~2Eqe*T`4qlZi8?i1#x#lfu$_)MDXB=eo6bK_-m+scnTTsHr3$<Y@|<}`FJ
znSZ`?KF80kW9Oa~@R2q5P-!Nfi5-gz#U4KM>M>dtEwgB~FyGG~c}AFj#(i|yJ%1a2
z<VIosM)Bydc;q(w+vsRojEbAgH5h7n1@4ex)+7nUgLaJ^xR<UD%+f1_z~SaH`y)t1
z(cA1s(c8@9!-z^g>XE5e%2m|kGfzN0XGq3dAH%P~61-l+`Rs#iADogIZFgw9%pmVV
zXG4nq3SF<!JU&#;KhQjDG>@!#sx*(JdCE19pn1?uv30gT`f-$Zku>rlJgbOq<VA9K
z{oKd)!@GZt6Vgg->SOX;MsYVPRZ*?>Isf&U)@(MrS4kF>kujyUphTxz1b0P2yS-hD
zes5mW{Ih#Wp)wLKD77<6?MQn8S0h<1TxeIG%4mi5Xh8{Ql{d$XyKpqZSNr|7H7!Do
zOR8*nRt<$hg+lw22b4W6t*y~QI9iz2YV|P2iH2KSl~(@^x<0E%3tEle-=Nnv=<NqE
zj(d+XgHiS<wKWArzwmF>!nz(VV7gk1Vvmu@N`8N3jsI_n`ihE?R=>ZZ61sW|dzFU9
zhWTvvfb!G<C9)?RZP4|~k=E-Al3tiqbablpUsPMjNR_pfBiXFdDpu5FB!3GA$Yiru
z+wUk9jjgkKR?(y3y9x&sU5ON=5$Mbc&Um)Q^E0fyaP{Sj_Cle6`Ra`gdgYhxi59B-
zGneT0j}$VRGO~N5E{y*B_SWgPug`0X>dwUhrAFK=)?}21$1_sPkfMxXYR3y<1QGcU
DwkV1b

diff --git a/substrate/frame/revive/rpc/examples/js/pvm/PiggyBank.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/PiggyBank.polkavm
index d0082db90e5e398832e4a32a9ec86dce83d16dd5..78455fcdd7c64a3a1f5e93b6d62cd03b46eb5953 100644
GIT binary patch
literal 5088
zcmcIneQX<N8Na*Ox%bXVdr95gUFx>3<z{X{lbI**#u%n#Z||xNY}0{(p*F44bK0!#
zl9XI>fRfi9wp*A|&mq_Bk5#2plN60j2-<E4l`rj_{=pytOo~J#J~Te0ZG5f!Xv1^o
zq@if5!~~I^dvou-@AE$I^ZPv?j(_q2idtMpIe#=ueLqHBqGGO*g=Txk1=0%Q2e}>O
zPLL0S+z;{)NH54BNC~6@vcIdh*Ooo$u7R#S`?E)eR^NKFUFp@g{+{7(`>nfA>mJzC
zKalO~(z^S)59}G}dANH|Z})+LzV3meeTP>y{rkJUyT7}ucTat?A5`=_{P59%u6@1T
z&JKHKS?TAydXIMR8R+Xe+<&0Eudh4pZehj_yX?cqjy9&dkMw4T8q2>3Fk@f(9QAq1
zpq``}m^+zW%tOow^H=whdp~=c?MIJrW$p*uM*boGL!P~!Yo1RvZEgN+^HlSLYhGRR
z)wN#XcH!f~QQ?b1dg%DD5Mh)2M<P5$(cd_Bmt~?{VRL?ParXycf{(CwP<J7UVxHE$
zA7Wul>k-yZF;)RStp0ZgHQw@yAs}OY{wN(`BPcE~XYXYZ#qbUEy{tenA9VYvhJ>4q
z3ob4$H2#4eZ}Be~LX9%kzepKv7B#+pk;=C$rrRQHEQ0)T0UcOMuuszw1X{K<r0hrB
zekc^<1xuX25hD}6m)%P-<KiONY-rK+BdRE=qM(XMWpjgk6HN|A$ytF+Nn{$4nRye<
zpJnF@Y?8%vPK)s_qA8=9IO&OH2PcO8*`itW&rcyE6Ez3@S#zS~pPyh2Ed<j>#;>;N
zYTFUDEu*&WrR#^@bYgng(4=wkDj0LqJVobbaz-w*H^MDNSl&%C(n!HeiqgcOpY+&A
zs2wJyc8q}(K}INgTwctdTuh%ZOv#uKh!!#?5YeK>L~b&d8KG=N9zW>vs2vwz$1@;P
zAg8FeF)KHm%bfbREfE$J07yJ@J;ClGS~xd2LV@=pD8lhHeHZdlOae6n6jTE^kq1oc
z8=8K4Ey2nWmSm!^l3T`l&GXlitcx1Wgd;2+;Ubg(p8()%05AgpGXO9H05dlM-1;c!
zdd(f-0N@A$j;x|=WKwt8Gpj&5C>uR-2mjqt!?)ZyZEpne3_YrS;WInG+5FoVe?TR6
ze)jTHUwx?|xwH6}lRtdiy}}CRCM=d&p}ajSKJ6x2YIN`uqATuJJ9I|vI0BLZ*}KfL
zo0Qprr)~u}0}fe+JVgo;IGGd#LyM6DGBkx0V0-8*jftJlzzu4y<jx1a*na!3T<^y6
zCxOb2<-xDHa+-~oi)V;t+Q1TR42BrdiZCbyzQVcxNt|~gwEq_OtFy{xwB{!|giV;}
zMkX{e2zT`7v`ZB4qKRfvBy))x?Rk;P4w`n5O&s^nKgR?4vnS1yHvJ*IAP(kCM0!GV
z6Lx@-On7bzIg$F~2DRfVaQhXImp~TZj^j2G;aga`=<pvb1$)9|Kq5nASRj3NOwA2=
zY6DGVf2?-59qJ3`n;<F7^-;Axup(vjgli>kfn(?HM)McYLZK<y6n2qJta=Ppb-t$C
z(NN7Jawt^205hObsrIqe36}Jv$f0oc2~O>hXthHCK_C#@8y54=FQ%V0E=b06f^i-h
z7jw_$GL*xWdQ|5$Kh3*FwJ;6W0_VaIf+PTgpKpYBfDtYd0#NPy3~O;1qK2l!5HqwR
zFerwWfgxpRdza(XyY7ty`zTXS&~1(IqBBaz2$*z%UV;>2Kc?)*DSAoixJ0AzOpG=%
zKY|4>+S3-jG_IWj6KeE0C^fWSgZ{XoEz(NI@8I<%m>z-gFOXR;(L7L$M|6_?vrWt-
z`};<<BMNa81_^=q-%ULEnb5Ouy_@J5wO!%FwGP)w!TE6z1tiA26Y*~q*fC}`u1Eee
zqE~8|boy$7^+E}Xld}q$N|EUpnTcnOY+?R9nxE#fC!rqM1?M6+0byXGB>DrD8-)4?
zF&h`!_rvy|Q{ZEAC~DcPgd#;UO122h%AVA#683qpkH<a^`&jI2#6CCnxv;MR`xxw_
zag#Y2)n_-GlVN>!lQ|jEXE&OYetp(wPD=Xh26Ix-XWPw_NUzGeC77kCUKPz!Sg*F3
zrI21-Z<hRe)oYd{y}HgU33|2FEF#@%(O*EO9@VY2rXJR<HKrcYt!Ai~MsG@gp$W`S
zpN~+X(COD4pFb$NTI-zim0wG;e`FzqO>q1eND*Y@ors{^BAiE7{SR3b@+xFh$T^T*
zfp+!ex<ZriO&NPR{fEg}M5f5JL}mnXNU;nL*_W~iQzOVa?wK>1Y6iPtv7|R@%_6cl
zW^r6?inpdYOLx`stTogCK+IiWYsVpFrD|pFiq6%3#Lk6Lt$|-C@N@UuN$konm*5VA
zLn6@mY^wY-!8R_DOk9;DRT5MQspw;h@koSqIW3RB9~S)ndc<yTT4-1|T<|NBPmv@=
z@+y*`NKJ}_6bV)bZH0l4!uoGthxWy(yFsYB5X#`uXF#SvPTlaVu|I0`OGY|o3<$;{
z#W0YuFJ<&Oje+CIde(tNQSZKjFa0CIe&+gr!`$~?UuJGNE_6)8_6^r<=I#qyw*hmb
zHgjV(bCv8#axj&h(Pt&|TvWfj**q84FK;r>h4jlC&2xVJvd=sx>6bT{=LG$7yE%jO
zSy`_N4wprT%WV#q*E?MHI$U1oaJkjta*J*uhs$dnF0XO8-0X0<$>Flc;WF=VnbTiz
zsLVQ4Zgi;ZHg&&lxlCQstp-yUbc-<ukwb3iNB!yB;zG#!dxE|D+Gr*f;p{Gq<59>B
z*$&e3iVL?t1xv(hjtgUCN+HuJJNWyux$G0!8DP(NVRNm3pj8|n`fzQCg`OH3KI<%N
zUF3{IF}v(sL{`BwcaAd-Mr-HT1(UV<n`-yCtR3jF>SM<iO8op+U}@;F=dVn9lAe@n
zF2vPB{FO=9+&w&`mn#)$b9cCm4A?Fh`z2!^m|Dm8y$FmOev4?x#znAk3E8-W!Lmbf
z<GGY^F>YK?05Z}m&0E0nC^_f{oy!TUx}30#^RXnl<E>H6RtuFXF5Ld~-x91JG{>QW
z!=0RbrRx$FMJ%>qaXl8jSX_t2RxGw)QNZF_EUv*~GZve$=)oe7MGlJyi!2r!vFOI4
z3yTd{WUxqM)&_QHvkmOfCL7qHjW)1DJ{#Df4K}bt?KZHw3}}!HbQKbq?By1hmX_?_
z4CG*}Hp#({g35HI1ZX9=fj6O6Lh?$o2jM?xh5e%$sGU(G6N?Ms#lI%lB+SEYqTh3F
zGn}CqsMQcPJPX|?#ekEZE^K~1wEfK$@7^&3yL@l1L{E%N$H`0z@_sQpk)4JF1-c<S
z%wI(Fx}E<aBInPu&<;86&TroU>~j;2L!k=X`8iPblQ-%nSOpHto3XqJ%Nw!m!}10!
zw_{nxvV>(3%WYU*k7X~G*I~I8%Pm+Iu)G$_Yp~pm<t8k9u*_o_lprj#SZ>6!8_O;%
zH=Lrd%nV_f{vkAL3ON*;Kko6Ow{EyCTswv!ZYxt@Y~_N{6T=)zpbvmu(cGY&zo0>h
z3-5pC)dU-;8yf=jvAEff#Z6e;h(#Y3H(;?Hi!v7fH+1l#U$59_H)(bzu9%QvDkf$V
z=!w@I+kz{$MZI9#mthjHT(%9U_y4o)ySK1y>8fK}r(;_xF5IoZl3@P?lVDq_?zeYg
zS{lNr8{()J0;vZgsS85s0K}3%;_1^3JD%XH8)jVn5K@OBrn+<kf@*gJrNN%KaL=nR
zC)oX0M>8>}qNMGgc=h6S?=Rttdi9-)SL{2mAavC!ULOf{zP?hGcn)emwIlSKm+g|Z
zV*v&_E_D9vW$LMXklP;gCY7yE1@6pu7K_D79&O1}t>wy@$>7#f<xC~GRV{ZW%e#{0
zsWatZsW?^%PAl63fo<x2$%GR4`B?nxW6AfVlkF`n+qjk;h@<oQvB{B<k>J+p4pj|2
zS}dLkj-Lr8-ZvgpcQMSeW^drWzzL-@U;lOj)Mq*?<w{U32Lpj}d3U+odEdcg*?w2c
ziZiyfsOt9eG1yk>ghz0D=c7eo2Wr{U*$I30wU<^-I&tG@1UppqbYMKVO)Uo1ty|U8
z!On8U(Yvg>egc@5++B1|SXmE7Jbu#}Z$Kr%&M;WCt+R|fZO0rt)3K!-9IpgRWkF5u
z+U8ihq7gPPUtmjlrh_{XoZhvpZ+E#BoLUZUxv{03SIRdIbC#V^86{Z?!lH8UG!EG2
zKL{&Yw}GP7V|L_&ba30&6R-;O-I#~D^=fNCU7b-|W96j1Y<VgGUM?0DFne-GP#w#o
K@@{p@7V2MxZf6nz

literal 5088
zcmcIneQX=$8Na*Ox%bXVdr95gUFx>3<p#Gy$i`?G#)sF1w|5n6wpTWf54CBXp3{b-
zm!$Q3$(RBQvz<^YHDOJ|A5>jYlT>1$xoZoP;(^yD=^ta0*cd`uBnDy|5+D1Ze6-nf
z=cJ(sDh+Lso_llez3=lr@ALaTAC7(RE{d94LD_#aOZ_@Zm8qzs|FwE+#sSg@;sv=5
z<OY!2LGA>(52O<$1Cj?B0omEs*=fnPR9jEmww({|HJ9ExA8bo4y>)l=wp(xQU0Qq3
zw(gz>yV|t&uJ&EqdO99x-`3f_tEa2IXJ41Tr0KKo*6!~1w$5$U$!<{5@xTN7dfIk$
zw%a?bnFXc$+dB8PZ|mu5)4O-IcXhR=oDIyd?ywFY-dCGy-`n}1SzG*rj~V{a{nUP{
zk9vx#VQyfyF!wS2%m>bS=T7!0+l>sa$i2v|=I`Tgb!~TDbbY^Wef_=l<Mnqhdw<!1
z<!<3R;XA@U;UOVqKHMvW*#!U1Fi%nR=-^GIQDkP<_Gad`-Ut(Xn7y952~iX?M%=fu
zFs4YD^-@eJ3m=yLyPg_qc+U`!v9fm`9cIHQCNRfuV-dyhHS}$)Kr!ENda0VYlZ^=u
zE+*8zMGrN2=MCXi%2@d_Wi(AwLo4T~-iEnUQ<#m0kvArwUGs7F89IzW%etDR^@v#y
zg<{-biT&4aq=UDy+bL#9oCBK;Es`=+QBp-g6_LuC8NQAtyCdYdK*l99fyiWe6qS#&
z<t&?EF>Pv5-a#}ala7&&Xee`_*Bi<ea^CVdGSZPk#v2+vkoT4kv4$3aX(R1bn~2)9
zS8YnGP21_}p;w%k>NPZJNW1{XTrp44=A_wYrniT=`7p~nNm|;Ub(5TQAmb$+))8vU
zD5bUxgXBQ^DSAkr>peV|I%tea#vy@d0pk!NTEsYHj+yCx%2MR=f-aZZavF9#3o;IJ
zg!%%r%w98n<TG2sEGPhwc;-@^-9ogWndzs%dtnsjc$&TmxhW=&>H!L>2As$PrqvB~
zzq=S`<uFUq5m?DBV7=^xiwV|2WzxYgONY5ICBP>D_#yzz0Kg0Y%mBd56#zFr3c5aY
zhB*K@jDRCcC>!bIP1ej3&^F3ON6f~5t5ov~r>SiZBc7o%+7Iv9{PX%ZUVf2^Z@%~Z
z(+A$JNo>x&bNHplor|ncuE1h}70MU0;s;KmB{P}tijJ65Z6S=>vKJ%`vVDPNS17Xp
zPhJgh1{|^gd6Hx$a5BjXh887RWM~S>!uG&pweii*!VO+^BsSmmQ1f+va(orbp8_h|
zmivF|Ff|J=2hR{~)B=`h!!SgNmV-ed@D<MeJaN8^(B7-uugWTm(VCYK2%8`wMmn%R
z19$YATAAV<G|{Fhk}gx3j+d!WreFoxfgx}Cc^=3gI$Stx(I3JK;$UhNk&eLBAuB*h
zIyg0s>`47hjoNYnxcwf;+aPma#BmFW;8iT0v-uB}f;~afBN3DI3Z%=5si_`UrKgVU
zj8<;8LjCNiI!FprT~wtDtVkLi!AhPx%dt~8qw;BVHd~jd3pz+TdU6n*Bz%Qf(QtAA
zkv)Nvr(p&(Dkr<xlZRN+ktBP9C!gfh7Kv6{1P}xQvAt@p_l3FCbH-`OcwR70A>)ks
zyqTtKu2iGi)VwtB$Y?<tt_9A8Apl7L1}|R=?*JoQBmkgV_t~q(V2BtRfgx&Wdtp!v
zEe%7`(6%qcse8r8arRNBnxLC%;e{|tOFx)&nx2OgVm-#K#}Rs7X(`iaC>^DZbOjc?
zY)wzo^F!JZFyU2t2$UMy>!3eoXmhmE@)vl08>ahVd<QaXndX6FJfaiq-z{P$*f(p{
zmI%aA5F`NN{VMU~WdhHA`c<OiVapYETobrX63&l-C?HYh%ZPuqz>YFYalQZZ5xrQ$
zq@x$&tQ$&Dj2u_Uc#=#+$z&|l7s{4Tq4ES5It=y5DmZ7j0}uwID1qLh%nZ~&h}oFX
zyc4#+LV=ITo=C}JB@`)=Rzf*oR_L%kBVmsVdwA^Ou!qH-TI_LRj{|#Zu!q4O8rK!Z
zBKqvw!dOtBT~inf=(DQ}V_tpMQy7!<*;R!xL7#0d97g(#tWOJtd_<oS3;Ccv(^SX@
z^qG}~yjP!b7xI!mv!aj}^qIy&4(Zbk`Z-h}5q)}jfduvGWd#z@r|Y3!8l6e~TpgI7
zIu)h@fukSVKEGRZG*&t1DSnt>|H?uLkAmZeL2@AdUq%GwD&gF}<bTMbkXIq2Le7Eg
z3bd;x*F~BHugKU7=|4!0BQj1VBr++GJxZz1MRp`hk1&--P-)0D)mK;PW6w^rq%%^Q
zMPz%l#Br5zzBIv=h@&#VmdqLeV(K(o8G@9RtQ5I(gsW88sUWJ<@Mp99)SXrmJJ-v_
zIUO7lhR$cr$iL%k?L0}xR7p}LL6wk-zOD2bVb)={Jl<|t@aCnk)!sDFux@YGt4JP2
zk`&3UNP;5ODH2j7SnanI`o0D0PhNud#jd-4sJal!;L&G6#zBr;_N=ipVsuMJDr)oy
z#vaA!L&lDz(PcLVwkNAu2NFfK`|`j2uQ+?prT>Px*Irs+ZZIaaOu+V4mn`P)2$rq^
z=0+^$MlI$lp~GZ%GBl~rN`(^<{ruX(iJ*RdP2og9Kfk(g!mFS66i!I``BjAzf_}ca
zFp2b8S)UPXE{ison`|zxw7KlIxxB*Wa-+@V27MaYTwZQ-d6~`SdYj92HkVyCmwB7Z
zoPN%xGHX-0)~2$vK)m|2qd+8mx~4z`eVQp`kWFsrN4=?QVnSg0pK<o)i<xvX%voI+
z$D@D~vK^%5MHg;-8kUF`Z5Kw#xI!k9R`7R)ejIu-Gzsh(%C4<s5wwa!=Is@eg`OH3
zzS42FbcQqbM6I%O29>g|sS}*BJ5o8po*iXN-F1~)9HmWYU}j+OY@RPa21^42FPs~5
zC0t3zRDi1l_;X{9satqRFXu*}&E4cM(qOw_?39chU}_cLYY`aN`zg_og^OU}60mRy
zf@OPR#`8(zOw2f~0A!?7Do+E)BV@N1bS@;UnT3R9oQfvU^`B-mOD$BYm~h=Q|A@0*
z&>Vvb4tKKil};oqidbyI;z}&KvA6<@jaY2JqJYKaSX_q1dMwsq(S=1GiyRga7FjIT
zV$q332Nr9v$Y7DitOcyO)&kaCV*zWfwtzJ~7O>_j3s|$+0+z^t21!F#A%V$mZf<^l
z-ulfzc1J5?9Q-I4oEXdlT5+!D6R4Gtyb|ng_zzlPZzc`3Gh(EpF(EkjL7Yv%JlrNS
zY2RjVnqr_<L)7prbe|LhP8!Ru{V1^UlSS`dKMA`$pDadCluX3PWD@dzE_5g~0SO9p
zLv|>iL1kj)e~8HPDHhrxyWM%?W5C`#WIGh9zzz3<vfqAOHKCN{u)G$_Yp}c;%N{JR
z!g4c~Wh_fr7O~uf<&{`=V|fLZ8?oGgWdX~}vAhh+^;oXMvJ1;RmO%-^GK=L}EIYC6
zz;ewI3d@X%W%?y())cZQS{`z_(WjT)7OV^+h}*$&Fm~{?(GkTQilZCBu85hj@)tBH
zG2!daz8`0ORbvByJ{H${vA70{tFh?8;wmgQV^PN9|Ar23^!lQGR+DCB;=xhKFoUC*
zjiV<&vTgG(+7@wxZ9joYz;eMhpx*z_wqISvw)qRTZLPL#$(V36c`wfX4JN_1WYuq9
z!L$^FQ9Hy@Cj?RlL{b}s(k_T4Z`jqP_gV1-U!5@H=!THeA*R~&J_xF<VUz-UV!|!&
zzZ++FUdW`Qc11~9Kk=%?>$X3_7xluI7q93qVL{-6UA(>-X#HrhD)AiDeri+T&+l3#
zYtw5m&@rL)_wQ0q5BRx_es@Ay|FrLhf!16uH!^_M4N#56kz-^2_4$!wBmVVju{BZL
zk|>TJEBf=f;Sv9YveD<;px%*)E528TV?P^Cd@Yq|ZfMxRHEcp0Juoml*5BXnUq8{J
zs=h~axnuq(j``!EC;aLbhFQ?;_TAw-sI(4Lza0ei>DH0rh+i%GeZFFGYq8jR$L>VY
zdRL2zJvKC`>c-+AY|FR8!@sfh(VVaeHLPoGg*`i(^NS}PynHl*9jbcN_k@3gn)9pc
z*Q-bUt;G>r?}F~?31C`cYtBAlaXlFE_!Vp1KJ^iQYY;5j&|1W;mSYBwwX7@ppBVAy
zi-MZivca}?Q6p?#xWKyNWD9rDKe1&&-_~LyIJM|scX>;3Kq+26%wBd(Wt2qT4~vTa
zqu6Jee>bdX+yIJ}j@dNer~Mn&AB0t)@A5p%tyCL*>e7tb7%e8OWeZb2@NzDvfZ1c4
P{Oa%kDsENRt)u<}4o5!@

diff --git a/substrate/frame/revive/uapi/Cargo.toml b/substrate/frame/revive/uapi/Cargo.toml
index 7241d667fcd..cf006941cfd 100644
--- a/substrate/frame/revive/uapi/Cargo.toml
+++ b/substrate/frame/revive/uapi/Cargo.toml
@@ -22,7 +22,7 @@ paste = { workspace = true }
 scale-info = { features = ["derive"], optional = true, workspace = true }
 
 [target.'cfg(target_arch = "riscv64")'.dependencies]
-polkavm-derive = { version = "0.18.0" }
+polkavm-derive = { version = "0.19.0" }
 
 [package.metadata.docs.rs]
 features = ["unstable-hostfn"]
-- 
GitLab


From c2531dc12dedfb345c16200229038ef8d04972cc Mon Sep 17 00:00:00 2001
From: Yuri Volkov <0@mcornholio.ru>
Date: Fri, 17 Jan 2025 18:00:04 +0100
Subject: [PATCH 075/116] review-bot upgrade (#7214)

Upgrading PAPI in review-bot:
https://github.com/paritytech/review-bot/issues/140
---
 .github/workflows/review-bot.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml
index 3dd5b111481..27c6162a0fc 100644
--- a/.github/workflows/review-bot.yml
+++ b/.github/workflows/review-bot.yml
@@ -29,7 +29,7 @@ jobs:
         with:
           artifact-name: pr_number
       - name: "Evaluates PR reviews and assigns reviewers"
-        uses: paritytech/review-bot@v2.6.0
+        uses: paritytech/review-bot@v2.7.0
         with:
           repo-token: ${{ steps.app_token.outputs.token }}
           team-token: ${{ steps.app_token.outputs.token }}
-- 
GitLab


From 0047c4cb15d3361454c4042f08bc69f28bdada8f Mon Sep 17 00:00:00 2001
From: Maksym H <1177472+mordamax@users.noreply.github.com>
Date: Fri, 17 Jan 2025 17:48:11 +0000
Subject: [PATCH 076/116] enable-deprecation-warning for old command bot
 (#7221)

Deprecation warning for old command bot
---
 .github/workflows/command-inform.yml | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/command-inform.yml b/.github/workflows/command-inform.yml
index 97346395319..3431eadf706 100644
--- a/.github/workflows/command-inform.yml
+++ b/.github/workflows/command-inform.yml
@@ -8,7 +8,7 @@ jobs:
   comment:
     runs-on: ubuntu-latest
     # Temporary disable the bot until the new command bot works properly
-    if: github.event.issue.pull_request && startsWith(github.event.comment.body, 'bot ') && false # disabled for now, until tested
+    if: github.event.issue.pull_request && startsWith(github.event.comment.body, 'bot ')
     steps:
       - name: Inform that the new command exist
         uses: actions/github-script@v7
@@ -18,5 +18,5 @@ jobs:
               issue_number: context.issue.number,
               owner: context.repo.owner,
               repo: context.repo.repo,
-              body: 'We have migrated the command bot to GHA<br/><br/>Please, see the new usage instructions <a href="https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/commands-readme.md">here</a>. Soon the old commands will be disabled.'
-            })
\ No newline at end of file
+              body: 'We have migrated the command bot to GHA<br/><br/>Please, see the new usage instructions <a href="https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/commands-readme.md">here</a> or <a href="https://forum.parity.io/t/streamlining-weight-generation-and-more-the-new-cmd-bot/2411">here</a>. Soon the old commands will be disabled.'
+            })
-- 
GitLab


From f90a785c1689f7a64bcb161490b4393dd0b65d65 Mon Sep 17 00:00:00 2001
From: Santi Balaguer <santiago.balaguer@gmail.com>
Date: Fri, 17 Jan 2025 18:50:03 +0100
Subject: [PATCH 077/116] added new proxy ParaRegistration to Westend (#6995)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This adds a new Proxy type to Westend Runtime called ParaRegistration.
This is related to:
https://github.com/polkadot-fellows/runtimes/pull/520.

This new proxy allows:
1. Reserve paraID
2. Register Parachain
3. Leverage Utilites pallet
4. Remove proxy.

---------

Co-authored-by: command-bot <>
Co-authored-by: Dónal Murray <donal.murray@parity.io>
---
 polkadot/runtime/westend/src/lib.rs | 10 ++++++++++
 prdoc/pr_6995.prdoc                 | 14 ++++++++++++++
 2 files changed, 24 insertions(+)
 create mode 100644 prdoc/pr_6995.prdoc

diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index 8a5771fe7cc..a9ba0778fe0 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -1087,6 +1087,7 @@ pub enum ProxyType {
 	CancelProxy,
 	Auction,
 	NominationPools,
+	ParaRegistration,
 }
 impl Default for ProxyType {
 	fn default() -> Self {
@@ -1183,6 +1184,15 @@ impl InstanceFilter<RuntimeCall> for ProxyType {
 					RuntimeCall::Registrar(..) |
 					RuntimeCall::Slots(..)
 			),
+			ProxyType::ParaRegistration => matches!(
+				c,
+				RuntimeCall::Registrar(paras_registrar::Call::reserve { .. }) |
+					RuntimeCall::Registrar(paras_registrar::Call::register { .. }) |
+					RuntimeCall::Utility(pallet_utility::Call::batch { .. }) |
+					RuntimeCall::Utility(pallet_utility::Call::batch_all { .. }) |
+					RuntimeCall::Utility(pallet_utility::Call::force_batch { .. }) |
+					RuntimeCall::Proxy(pallet_proxy::Call::remove_proxy { .. })
+			),
 		}
 	}
 	fn is_superset(&self, o: &Self) -> bool {
diff --git a/prdoc/pr_6995.prdoc b/prdoc/pr_6995.prdoc
new file mode 100644
index 00000000000..ffdb4738a6f
--- /dev/null
+++ b/prdoc/pr_6995.prdoc
@@ -0,0 +1,14 @@
+title: added new proxy ParaRegistration to Westend
+doc:
+- audience: Runtime User
+  description: |-
+    This adds a new Proxy type to Westend Runtime called ParaRegistration. This is related to: https://github.com/polkadot-fellows/runtimes/pull/520.
+
+    This new proxy allows:
+    1. Reserve paraID
+    2. Register Parachain
+    3. Leverage Utilites pallet
+    4. Remove proxy.
+crates:
+- name: westend-runtime
+  bump: major
-- 
GitLab


From 7702fdd1bd869e518bf176ccf0268f83f8927f9b Mon Sep 17 00:00:00 2001
From: PG Herveou <pgherveou@gmail.com>
Date: Fri, 17 Jan 2025 19:21:38 +0100
Subject: [PATCH 078/116] [pallet-revive] Add  tracing support (1/3) (#7166)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Add foundation for supporting call traces in pallet_revive

Follow up:
- PR #7167 Add changes to eth-rpc to introduce debug endpoint that will
use pallet-revive tracing features
- PR #6727 Add new RPC to the client and implement tracing runtime API
that can capture traces on previous blocks

---------

Co-authored-by: Alexander Theißen <alex.theissen@me.com>
---
 Cargo.lock                                    |   1 +
 .../assets/asset-hub-westend/src/lib.rs       |   1 -
 substrate/bin/node/runtime/src/lib.rs         |   1 -
 substrate/frame/revive/Cargo.toml             |   2 +
 .../frame/revive/fixtures/build/_Cargo.toml   |   1 +
 .../revive/fixtures/contracts/tracing.rs      |  75 ++++++
 .../fixtures/contracts/tracing_callee.rs      |  45 ++++
 substrate/frame/revive/rpc/src/client.rs      |  46 +---
 .../frame/revive/src/benchmarking/mod.rs      |   2 +-
 substrate/frame/revive/src/debug.rs           | 109 --------
 substrate/frame/revive/src/evm.rs             |  45 ++++
 substrate/frame/revive/src/evm/api.rs         |   5 +
 substrate/frame/revive/src/evm/api/byte.rs    |  74 +-----
 .../revive/src/evm/api/debug_rpc_types.rs     | 219 ++++++++++++++++
 .../frame/revive/src/evm/api/hex_serde.rs     |  84 +++++++
 substrate/frame/revive/src/evm/runtime.rs     |  14 +-
 substrate/frame/revive/src/evm/tracing.rs     | 134 ++++++++++
 substrate/frame/revive/src/exec.rs            |  76 ++++--
 substrate/frame/revive/src/lib.rs             |  12 +-
 substrate/frame/revive/src/tests.rs           | 158 +++++++++++-
 .../frame/revive/src/tests/test_debug.rs      | 235 ------------------
 substrate/frame/revive/src/tracing.rs         |  64 +++++
 22 files changed, 912 insertions(+), 491 deletions(-)
 create mode 100644 substrate/frame/revive/fixtures/contracts/tracing.rs
 create mode 100644 substrate/frame/revive/fixtures/contracts/tracing_callee.rs
 delete mode 100644 substrate/frame/revive/src/debug.rs
 create mode 100644 substrate/frame/revive/src/evm/api/debug_rpc_types.rs
 create mode 100644 substrate/frame/revive/src/evm/api/hex_serde.rs
 create mode 100644 substrate/frame/revive/src/evm/tracing.rs
 delete mode 100644 substrate/frame/revive/src/tests/test_debug.rs
 create mode 100644 substrate/frame/revive/src/tracing.rs

diff --git a/Cargo.lock b/Cargo.lock
index 23271617e92..da4e8551191 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -14839,6 +14839,7 @@ dependencies = [
  "assert_matches",
  "derive_more 0.99.17",
  "environmental",
+ "ethabi-decode 2.0.0",
  "ethereum-types 0.15.1",
  "frame-benchmarking 28.0.0",
  "frame-support 28.0.0",
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
index 3ef5e87f24c..41f29fe2c56 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
@@ -1077,7 +1077,6 @@ impl pallet_revive::Config for Runtime {
 	type InstantiateOrigin = EnsureSigned<Self::AccountId>;
 	type RuntimeHoldReason = RuntimeHoldReason;
 	type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent;
-	type Debug = ();
 	type Xcm = pallet_xcm::Pallet<Self>;
 	type ChainId = ConstU64<420_420_421>;
 	type NativeToEthRatio = ConstU32<1_000_000>; // 10^(18 - 12) Eth is 10^18, Native is 10^12.
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index 117d306e306..26f4dacf9a1 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -1491,7 +1491,6 @@ impl pallet_revive::Config for Runtime {
 	type InstantiateOrigin = EnsureSigned<Self::AccountId>;
 	type RuntimeHoldReason = RuntimeHoldReason;
 	type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent;
-	type Debug = ();
 	type Xcm = ();
 	type ChainId = ConstU64<420_420_420>;
 	type NativeToEthRatio = ConstU32<1_000_000>; // 10^(18 - 12) Eth is 10^18, Native is 10^12.
diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml
index 49a27cfdaab..0959cc50638 100644
--- a/substrate/frame/revive/Cargo.toml
+++ b/substrate/frame/revive/Cargo.toml
@@ -20,6 +20,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 codec = { features = ["derive", "max-encoded-len"], workspace = true }
 derive_more = { workspace = true }
 environmental = { workspace = true }
+ethabi = { workspace = true }
 ethereum-types = { workspace = true, features = ["codec", "rlp", "serialize"] }
 hex = { workspace = true }
 impl-trait-for-tuples = { workspace = true }
@@ -75,6 +76,7 @@ default = ["std"]
 std = [
 	"codec/std",
 	"environmental/std",
+	"ethabi/std",
 	"ethereum-types/std",
 	"frame-benchmarking?/std",
 	"frame-support/std",
diff --git a/substrate/frame/revive/fixtures/build/_Cargo.toml b/substrate/frame/revive/fixtures/build/_Cargo.toml
index 483d9775b12..1a0a635420a 100644
--- a/substrate/frame/revive/fixtures/build/_Cargo.toml
+++ b/substrate/frame/revive/fixtures/build/_Cargo.toml
@@ -14,6 +14,7 @@ edition = "2021"
 [dependencies]
 uapi = { package = 'pallet-revive-uapi', path = "", features = ["unstable-hostfn"], default-features = false }
 common = { package = 'pallet-revive-fixtures-common', path = "" }
+hex-literal = { version = "0.4.1", default-features = false }
 polkavm-derive = { version = "0.19.0" }
 
 [profile.release]
diff --git a/substrate/frame/revive/fixtures/contracts/tracing.rs b/substrate/frame/revive/fixtures/contracts/tracing.rs
new file mode 100644
index 00000000000..9cbef3bbc84
--- /dev/null
+++ b/substrate/frame/revive/fixtures/contracts/tracing.rs
@@ -0,0 +1,75 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! This fixture calls itself as many times as passed as argument.
+
+#![no_std]
+#![no_main]
+
+use common::input;
+use uapi::{HostFn, HostFnImpl as api};
+
+#[no_mangle]
+#[polkavm_derive::polkavm_export]
+pub extern "C" fn deploy() {}
+
+#[no_mangle]
+#[polkavm_derive::polkavm_export]
+pub extern "C" fn call() {
+	input!(calls_left: u32, callee_addr: &[u8; 20],);
+	if calls_left == 0 {
+		return
+	}
+
+	let next_input = (calls_left - 1).to_le_bytes();
+	api::deposit_event(&[], b"before");
+
+	// Call the callee, ignore revert.
+	let _ = api::call(
+		uapi::CallFlags::empty(),
+		callee_addr,
+		u64::MAX,       // How much ref_time to devote for the execution. u64::MAX = use all.
+		u64::MAX,       // How much proof_size to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32], // No deposit limit.
+		&[0u8; 32],     // Value transferred to the contract.
+		&next_input,
+		None,
+	);
+
+	api::deposit_event(&[], b"after");
+
+	// own address
+	let mut addr = [0u8; 20];
+	api::address(&mut addr);
+	let mut input = [0u8; 24];
+
+	input[..4].copy_from_slice(&next_input);
+	input[4..24].copy_from_slice(&callee_addr[..20]);
+
+	// recurse
+	api::call(
+		uapi::CallFlags::ALLOW_REENTRY,
+		&addr,
+		u64::MAX,       // How much ref_time to devote for the execution. u64::MAX = use all.
+		u64::MAX,       // How much proof_size to devote for the execution. u64::MAX = use all.
+		&[u8::MAX; 32], // No deposit limit.
+		&[0u8; 32],     // Value transferred to the contract.
+		&input,
+		None,
+	)
+	.unwrap();
+}
diff --git a/substrate/frame/revive/fixtures/contracts/tracing_callee.rs b/substrate/frame/revive/fixtures/contracts/tracing_callee.rs
new file mode 100644
index 00000000000..d44771e417f
--- /dev/null
+++ b/substrate/frame/revive/fixtures/contracts/tracing_callee.rs
@@ -0,0 +1,45 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#![no_std]
+#![no_main]
+
+use common::input;
+use uapi::{HostFn, HostFnImpl as api};
+
+#[no_mangle]
+#[polkavm_derive::polkavm_export]
+pub extern "C" fn deploy() {}
+
+#[no_mangle]
+#[polkavm_derive::polkavm_export]
+pub extern "C" fn call() {
+	input!(id: u32, );
+
+	match id {
+		// Revert with message "This function always fails"
+		2 => {
+			let data = hex_literal::hex!(
+		       "08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001a546869732066756e6374696f6e20616c77617973206661696c73000000000000"
+			);
+			api::return_value(uapi::ReturnFlags::REVERT, &data)
+		},
+		1 => {
+			panic!("booum");
+		},
+		_ => api::return_value(uapi::ReturnFlags::empty(), &id.to_le_bytes()),
+	};
+}
diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs
index cd0effe7faf..c61c5871f76 100644
--- a/substrate/frame/revive/rpc/src/client.rs
+++ b/substrate/frame/revive/rpc/src/client.rs
@@ -27,8 +27,9 @@ use crate::{
 use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObjectOwned};
 use pallet_revive::{
 	evm::{
-		Block, BlockNumberOrTag, BlockNumberOrTagOrHash, GenericTransaction, ReceiptInfo,
-		SyncingProgress, SyncingStatus, TransactionSigned, H160, H256, U256,
+		extract_revert_message, Block, BlockNumberOrTag, BlockNumberOrTagOrHash,
+		GenericTransaction, ReceiptInfo, SyncingProgress, SyncingStatus, TransactionSigned, H160,
+		H256, U256,
 	},
 	EthTransactError, EthTransactInfo,
 };
@@ -83,47 +84,6 @@ fn unwrap_call_err(err: &subxt::error::RpcError) -> Option<ErrorObjectOwned> {
 	}
 }
 
-/// Extract the revert message from a revert("msg") solidity statement.
-fn extract_revert_message(exec_data: &[u8]) -> Option<String> {
-	let error_selector = exec_data.get(0..4)?;
-
-	match error_selector {
-		// assert(false)
-		[0x4E, 0x48, 0x7B, 0x71] => {
-			let panic_code: u32 = U256::from_big_endian(exec_data.get(4..36)?).try_into().ok()?;
-
-			// See https://docs.soliditylang.org/en/latest/control-structures.html#panic-via-assert-and-error-via-require
-			let msg = match panic_code {
-				0x00 => "generic panic",
-				0x01 => "assert(false)",
-				0x11 => "arithmetic underflow or overflow",
-				0x12 => "division or modulo by zero",
-				0x21 => "enum overflow",
-				0x22 => "invalid encoded storage byte array accessed",
-				0x31 => "out-of-bounds array access; popping on an empty array",
-				0x32 => "out-of-bounds access of an array or bytesN",
-				0x41 => "out of memory",
-				0x51 => "uninitialized function",
-				code => return Some(format!("execution reverted: unknown panic code: {code:#x}")),
-			};
-
-			Some(format!("execution reverted: {msg}"))
-		},
-		// revert(string)
-		[0x08, 0xC3, 0x79, 0xA0] => {
-			let decoded = ethabi::decode(&[ethabi::ParamType::String], &exec_data[4..]).ok()?;
-			if let Some(ethabi::Token::String(msg)) = decoded.first() {
-				return Some(format!("execution reverted: {msg}"))
-			}
-			Some("execution reverted".to_string())
-		},
-		_ => {
-			log::debug!(target: LOG_TARGET, "Unknown revert function selector: {error_selector:?}");
-			Some("execution reverted".to_string())
-		},
-	}
-}
-
 /// The error type for the client.
 #[derive(Error, Debug)]
 pub enum ClientError {
diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs
index 18d7bb0afc3..16bdd6d1a18 100644
--- a/substrate/frame/revive/src/benchmarking/mod.rs
+++ b/substrate/frame/revive/src/benchmarking/mod.rs
@@ -772,7 +772,7 @@ mod benchmarks {
 		let mut setup = CallSetup::<T>::default();
 		let input = setup.data();
 		let (mut ext, _) = setup.ext();
-		ext.override_export(crate::debug::ExportedFunction::Constructor);
+		ext.override_export(crate::exec::ExportedFunction::Constructor);
 
 		let mut runtime = crate::wasm::Runtime::<_, [u8]>::new(&mut ext, input);
 
diff --git a/substrate/frame/revive/src/debug.rs b/substrate/frame/revive/src/debug.rs
deleted file mode 100644
index d1fc0823e03..00000000000
--- a/substrate/frame/revive/src/debug.rs
+++ /dev/null
@@ -1,109 +0,0 @@
-// This file is part of Substrate.
-
-// Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
-
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// 	http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-pub use crate::{
-	exec::{ExecResult, ExportedFunction},
-	primitives::ExecReturnValue,
-};
-use crate::{Config, LOG_TARGET};
-use sp_core::H160;
-
-/// Umbrella trait for all interfaces that serves for debugging.
-pub trait Debugger<T: Config>: Tracing<T> + CallInterceptor<T> {}
-
-impl<T: Config, V> Debugger<T> for V where V: Tracing<T> + CallInterceptor<T> {}
-
-/// Defines methods to capture contract calls, enabling external observers to
-/// measure, trace, and react to contract interactions.
-pub trait Tracing<T: Config> {
-	/// The type of [`CallSpan`] that is created by this trait.
-	type CallSpan: CallSpan;
-
-	/// Creates a new call span to encompass the upcoming contract execution.
-	///
-	/// This method should be invoked just before the execution of a contract and
-	/// marks the beginning of a traceable span of execution.
-	///
-	/// # Arguments
-	///
-	/// * `contract_address` - The address of the contract that is about to be executed.
-	/// * `entry_point` - Describes whether the call is the constructor or a regular call.
-	/// * `input_data` - The raw input data of the call.
-	fn new_call_span(
-		contract_address: &H160,
-		entry_point: ExportedFunction,
-		input_data: &[u8],
-	) -> Self::CallSpan;
-}
-
-/// Defines a span of execution for a contract call.
-pub trait CallSpan {
-	/// Called just after the execution of a contract.
-	///
-	/// # Arguments
-	///
-	/// * `output` - The raw output of the call.
-	fn after_call(self, output: &ExecReturnValue);
-}
-
-impl<T: Config> Tracing<T> for () {
-	type CallSpan = ();
-
-	fn new_call_span(contract_address: &H160, entry_point: ExportedFunction, input_data: &[u8]) {
-		log::trace!(target: LOG_TARGET, "call {entry_point:?} address: {contract_address:?}, input_data: {input_data:?}")
-	}
-}
-
-impl CallSpan for () {
-	fn after_call(self, output: &ExecReturnValue) {
-		log::trace!(target: LOG_TARGET, "call result {output:?}")
-	}
-}
-
-/// Provides an interface for intercepting contract calls.
-pub trait CallInterceptor<T: Config> {
-	/// Allows to intercept contract calls and decide whether they should be executed or not.
-	/// If the call is intercepted, the mocked result of the call is returned.
-	///
-	/// # Arguments
-	///
-	/// * `contract_address` - The address of the contract that is about to be executed.
-	/// * `entry_point` - Describes whether the call is the constructor or a regular call.
-	/// * `input_data` - The raw input data of the call.
-	///
-	/// # Expected behavior
-	///
-	/// This method should return:
-	/// * `Some(ExecResult)` - if the call should be intercepted and the mocked result of the call
-	/// is returned.
-	/// * `None` - otherwise, i.e. the call should be executed normally.
-	fn intercept_call(
-		contract_address: &H160,
-		entry_point: ExportedFunction,
-		input_data: &[u8],
-	) -> Option<ExecResult>;
-}
-
-impl<T: Config> CallInterceptor<T> for () {
-	fn intercept_call(
-		_contract_address: &H160,
-		_entry_point: ExportedFunction,
-		_input_data: &[u8],
-	) -> Option<ExecResult> {
-		None
-	}
-}
diff --git a/substrate/frame/revive/src/evm.rs b/substrate/frame/revive/src/evm.rs
index c8c967fbe09..33660a36aa6 100644
--- a/substrate/frame/revive/src/evm.rs
+++ b/substrate/frame/revive/src/evm.rs
@@ -19,6 +19,51 @@
 
 mod api;
 pub use api::*;
+mod tracing;
+pub use tracing::*;
 mod gas_encoder;
 pub use gas_encoder::*;
 pub mod runtime;
+
+use crate::alloc::{format, string::*};
+
+/// Extract the revert message from a revert("msg") solidity statement.
+pub fn extract_revert_message(exec_data: &[u8]) -> Option<String> {
+	let error_selector = exec_data.get(0..4)?;
+
+	match error_selector {
+		// assert(false)
+		[0x4E, 0x48, 0x7B, 0x71] => {
+			let panic_code: u32 = U256::from_big_endian(exec_data.get(4..36)?).try_into().ok()?;
+
+			// See https://docs.soliditylang.org/en/latest/control-structures.html#panic-via-assert-and-error-via-require
+			let msg = match panic_code {
+				0x00 => "generic panic",
+				0x01 => "assert(false)",
+				0x11 => "arithmetic underflow or overflow",
+				0x12 => "division or modulo by zero",
+				0x21 => "enum overflow",
+				0x22 => "invalid encoded storage byte array accessed",
+				0x31 => "out-of-bounds array access; popping on an empty array",
+				0x32 => "out-of-bounds access of an array or bytesN",
+				0x41 => "out of memory",
+				0x51 => "uninitialized function",
+				code => return Some(format!("execution reverted: unknown panic code: {code:#x}")),
+			};
+
+			Some(format!("execution reverted: {msg}"))
+		},
+		// revert(string)
+		[0x08, 0xC3, 0x79, 0xA0] => {
+			let decoded = ethabi::decode(&[ethabi::ParamKind::String], &exec_data[4..]).ok()?;
+			if let Some(ethabi::Token::String(msg)) = decoded.first() {
+				return Some(format!("execution reverted: {}", String::from_utf8_lossy(msg)))
+			}
+			Some("execution reverted".to_string())
+		},
+		_ => {
+			log::debug!(target: crate::LOG_TARGET, "Unknown revert function selector: {error_selector:?}");
+			Some("execution reverted".to_string())
+		},
+	}
+}
diff --git a/substrate/frame/revive/src/evm/api.rs b/substrate/frame/revive/src/evm/api.rs
index fe18c8735be..7a34fdc83f9 100644
--- a/substrate/frame/revive/src/evm/api.rs
+++ b/substrate/frame/revive/src/evm/api.rs
@@ -16,6 +16,8 @@
 // limitations under the License.
 //! JSON-RPC methods and types, for Ethereum.
 
+mod hex_serde;
+
 mod byte;
 pub use byte::*;
 
@@ -25,6 +27,9 @@ pub use rlp;
 mod type_id;
 pub use type_id::*;
 
+mod debug_rpc_types;
+pub use debug_rpc_types::*;
+
 mod rpc_types;
 mod rpc_types_gen;
 pub use rpc_types_gen::*;
diff --git a/substrate/frame/revive/src/evm/api/byte.rs b/substrate/frame/revive/src/evm/api/byte.rs
index c2d64f8e5e4..f11966d0072 100644
--- a/substrate/frame/revive/src/evm/api/byte.rs
+++ b/substrate/frame/revive/src/evm/api/byte.rs
@@ -15,79 +15,16 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 //! Define Byte wrapper types for encoding and decoding hex strings
+use super::hex_serde::HexCodec;
 use alloc::{vec, vec::Vec};
 use codec::{Decode, Encode};
 use core::{
 	fmt::{Debug, Display, Formatter, Result as FmtResult},
 	str::FromStr,
 };
-use hex_serde::HexCodec;
 use scale_info::TypeInfo;
 use serde::{Deserialize, Serialize};
 
-mod hex_serde {
-	#[cfg(not(feature = "std"))]
-	use alloc::{format, string::String, vec::Vec};
-	use serde::{Deserialize, Deserializer, Serializer};
-
-	pub trait HexCodec: Sized {
-		type Error;
-		fn to_hex(&self) -> String;
-		fn from_hex(s: String) -> Result<Self, Self::Error>;
-	}
-
-	impl HexCodec for u8 {
-		type Error = core::num::ParseIntError;
-		fn to_hex(&self) -> String {
-			format!("0x{:x}", self)
-		}
-		fn from_hex(s: String) -> Result<Self, Self::Error> {
-			u8::from_str_radix(s.trim_start_matches("0x"), 16)
-		}
-	}
-
-	impl<const T: usize> HexCodec for [u8; T] {
-		type Error = hex::FromHexError;
-		fn to_hex(&self) -> String {
-			format!("0x{}", hex::encode(self))
-		}
-		fn from_hex(s: String) -> Result<Self, Self::Error> {
-			let data = hex::decode(s.trim_start_matches("0x"))?;
-			data.try_into().map_err(|_| hex::FromHexError::InvalidStringLength)
-		}
-	}
-
-	impl HexCodec for Vec<u8> {
-		type Error = hex::FromHexError;
-		fn to_hex(&self) -> String {
-			format!("0x{}", hex::encode(self))
-		}
-		fn from_hex(s: String) -> Result<Self, Self::Error> {
-			hex::decode(s.trim_start_matches("0x"))
-		}
-	}
-
-	pub fn serialize<S, T>(value: &T, serializer: S) -> Result<S::Ok, S::Error>
-	where
-		S: Serializer,
-		T: HexCodec,
-	{
-		let s = value.to_hex();
-		serializer.serialize_str(&s)
-	}
-
-	pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error>
-	where
-		D: Deserializer<'de>,
-		T: HexCodec,
-		<T as HexCodec>::Error: core::fmt::Debug,
-	{
-		let s = String::deserialize(deserializer)?;
-		let value = T::from_hex(s).map_err(|e| serde::de::Error::custom(format!("{:?}", e)))?;
-		Ok(value)
-	}
-}
-
 impl FromStr for Bytes {
 	type Err = hex::FromHexError;
 	fn from_str(s: &str) -> Result<Self, Self::Err> {
@@ -100,7 +37,7 @@ macro_rules! impl_hex {
     ($type:ident, $inner:ty, $default:expr) => {
         #[derive(Encode, Decode, Eq, PartialEq, TypeInfo, Clone, Serialize, Deserialize)]
         #[doc = concat!("`", stringify!($inner), "`", " wrapper type for encoding and decoding hex strings")]
-        pub struct $type(#[serde(with = "hex_serde")] pub $inner);
+        pub struct $type(#[serde(with = "crate::evm::api::hex_serde")] pub $inner);
 
         impl Default for $type {
             fn default() -> Self {
@@ -131,6 +68,13 @@ macro_rules! impl_hex {
     };
 }
 
+impl Bytes {
+	/// See `Vec::is_empty`
+	pub fn is_empty(&self) -> bool {
+		self.0.is_empty()
+	}
+}
+
 impl_hex!(Byte, u8, 0u8);
 impl_hex!(Bytes, Vec<u8>, vec![]);
 impl_hex!(Bytes8, [u8; 8], [0u8; 8]);
diff --git a/substrate/frame/revive/src/evm/api/debug_rpc_types.rs b/substrate/frame/revive/src/evm/api/debug_rpc_types.rs
new file mode 100644
index 00000000000..0857a59fbf3
--- /dev/null
+++ b/substrate/frame/revive/src/evm/api/debug_rpc_types.rs
@@ -0,0 +1,219 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::evm::{Bytes, CallTracer};
+use alloc::{fmt, string::String, vec::Vec};
+use codec::{Decode, Encode};
+use scale_info::TypeInfo;
+use serde::{
+	de::{self, MapAccess, Visitor},
+	Deserialize, Deserializer, Serialize,
+};
+use sp_core::{H160, H256, U256};
+
+/// Tracer configuration used to trace calls.
+#[derive(TypeInfo, Debug, Clone, Encode, Decode, Serialize, PartialEq)]
+#[serde(tag = "tracer", content = "tracerConfig")]
+pub enum TracerConfig {
+	/// A tracer that captures call traces.
+	#[serde(rename = "callTracer")]
+	CallTracer {
+		/// Whether or not to capture logs.
+		#[serde(rename = "withLog")]
+		with_logs: bool,
+	},
+}
+
+impl TracerConfig {
+	/// Build the tracer associated to this config.
+	pub fn build<G>(self, gas_mapper: G) -> CallTracer<U256, G> {
+		match self {
+			Self::CallTracer { with_logs } => CallTracer::new(with_logs, gas_mapper),
+		}
+	}
+}
+
+/// Custom deserializer to support the following JSON format:
+///
+/// ```json
+/// { "tracer": "callTracer", "tracerConfig": { "withLogs": false } }
+/// ```
+///
+/// ```json
+/// { "tracer": "callTracer" }
+/// ```
+impl<'de> Deserialize<'de> for TracerConfig {
+	fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+	where
+		D: Deserializer<'de>,
+	{
+		struct TracerConfigVisitor;
+
+		impl<'de> Visitor<'de> for TracerConfigVisitor {
+			type Value = TracerConfig;
+
+			fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+				formatter.write_str("a map with tracer and optional tracerConfig")
+			}
+
+			fn visit_map<M>(self, mut map: M) -> Result<Self::Value, M::Error>
+			where
+				M: MapAccess<'de>,
+			{
+				let mut tracer_type: Option<String> = None;
+				let mut with_logs = None;
+
+				while let Some(key) = map.next_key::<String>()? {
+					match key.as_str() {
+						"tracer" => {
+							tracer_type = map.next_value()?;
+						},
+						"tracerConfig" => {
+							#[derive(Deserialize)]
+							struct CallTracerConfig {
+								#[serde(rename = "withLogs")]
+								with_logs: Option<bool>,
+							}
+							let inner: CallTracerConfig = map.next_value()?;
+							with_logs = inner.with_logs;
+						},
+						_ => {},
+					}
+				}
+
+				match tracer_type.as_deref() {
+					Some("callTracer") =>
+						Ok(TracerConfig::CallTracer { with_logs: with_logs.unwrap_or(true) }),
+					_ => Err(de::Error::custom("Unsupported or missing tracer type")),
+				}
+			}
+		}
+
+		deserializer.deserialize_map(TracerConfigVisitor)
+	}
+}
+
+#[test]
+fn test_tracer_config_serialization() {
+	let tracers = vec![
+		(r#"{"tracer": "callTracer"}"#, TracerConfig::CallTracer { with_logs: true }),
+		(
+			r#"{"tracer": "callTracer", "tracerConfig": { "withLogs": true }}"#,
+			TracerConfig::CallTracer { with_logs: true },
+		),
+		(
+			r#"{"tracer": "callTracer", "tracerConfig": { "withLogs": false }}"#,
+			TracerConfig::CallTracer { with_logs: false },
+		),
+	];
+
+	for (json_data, expected) in tracers {
+		let result: TracerConfig =
+			serde_json::from_str(json_data).expect("Deserialization should succeed");
+		assert_eq!(result, expected);
+	}
+}
+
+impl Default for TracerConfig {
+	fn default() -> Self {
+		TracerConfig::CallTracer { with_logs: false }
+	}
+}
+
+/// The type of call that was executed.
+#[derive(
+	Default, TypeInfo, Encode, Decode, Serialize, Deserialize, Eq, PartialEq, Clone, Debug,
+)]
+#[serde(rename_all = "UPPERCASE")]
+pub enum CallType {
+	/// A regular call.
+	#[default]
+	Call,
+	/// A read-only call.
+	StaticCall,
+	/// A delegate call.
+	DelegateCall,
+}
+
+/// A smart contract execution call trace.
+#[derive(
+	TypeInfo, Default, Encode, Decode, Serialize, Deserialize, Clone, Debug, Eq, PartialEq,
+)]
+pub struct CallTrace<Gas = U256> {
+	/// Address of the sender.
+	pub from: H160,
+	/// Address of the receiver.
+	pub to: H160,
+	/// Call input data.
+	pub input: Vec<u8>,
+	/// Amount of value transferred.
+	#[serde(skip_serializing_if = "U256::is_zero")]
+	pub value: U256,
+	/// Type of call.
+	#[serde(rename = "type")]
+	pub call_type: CallType,
+	/// Amount of gas provided for the call.
+	pub gas: Gas,
+	/// Amount of gas used.
+	#[serde(rename = "gasUsed")]
+	pub gas_used: Gas,
+	/// Return data.
+	#[serde(flatten, skip_serializing_if = "Bytes::is_empty")]
+	pub output: Bytes,
+	/// The error message if the call failed.
+	#[serde(skip_serializing_if = "Option::is_none")]
+	pub error: Option<String>,
+	/// The revert reason, if the call reverted.
+	#[serde(rename = "revertReason")]
+	pub revert_reason: Option<String>,
+	/// List of sub-calls.
+	#[serde(skip_serializing_if = "Vec::is_empty")]
+	pub calls: Vec<CallTrace<Gas>>,
+	/// List of logs emitted during the call.
+	#[serde(skip_serializing_if = "Vec::is_empty")]
+	pub logs: Vec<CallLog>,
+}
+
+/// A log emitted during a call.
+#[derive(
+	Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq,
+)]
+pub struct CallLog {
+	/// The address of the contract that emitted the log.
+	pub address: H160,
+	/// The log's data.
+	#[serde(skip_serializing_if = "Bytes::is_empty")]
+	pub data: Bytes,
+	/// The topics used to index the log.
+	#[serde(default, skip_serializing_if = "Vec::is_empty")]
+	pub topics: Vec<H256>,
+	/// Position of the log relative to subcalls within the same trace
+	/// See <https://github.com/ethereum/go-ethereum/pull/28389> for details
+	#[serde(with = "super::hex_serde")]
+	pub position: u32,
+}
+
+/// A transaction trace
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct TransactionTrace {
+	/// The transaction hash.
+	#[serde(rename = "txHash")]
+	pub tx_hash: H256,
+	/// The trace of the transaction.
+	#[serde(rename = "result")]
+	pub trace: CallTrace,
+}
diff --git a/substrate/frame/revive/src/evm/api/hex_serde.rs b/substrate/frame/revive/src/evm/api/hex_serde.rs
new file mode 100644
index 00000000000..ba07b36fa4b
--- /dev/null
+++ b/substrate/frame/revive/src/evm/api/hex_serde.rs
@@ -0,0 +1,84 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use alloc::{format, string::String, vec::Vec};
+use serde::{Deserialize, Deserializer, Serializer};
+
+pub trait HexCodec: Sized {
+	type Error;
+	fn to_hex(&self) -> String;
+	fn from_hex(s: String) -> Result<Self, Self::Error>;
+}
+
+macro_rules! impl_hex_codec {
+    ($($t:ty),*) => {
+        $(
+            impl HexCodec for $t {
+                type Error = core::num::ParseIntError;
+                fn to_hex(&self) -> String {
+                    format!("0x{:x}", self)
+                }
+                fn from_hex(s: String) -> Result<Self, Self::Error> {
+                    <$t>::from_str_radix(s.trim_start_matches("0x"), 16)
+                }
+            }
+        )*
+    };
+}
+
+impl_hex_codec!(u8, u32);
+
+impl<const T: usize> HexCodec for [u8; T] {
+	type Error = hex::FromHexError;
+	fn to_hex(&self) -> String {
+		format!("0x{}", hex::encode(self))
+	}
+	fn from_hex(s: String) -> Result<Self, Self::Error> {
+		let data = hex::decode(s.trim_start_matches("0x"))?;
+		data.try_into().map_err(|_| hex::FromHexError::InvalidStringLength)
+	}
+}
+
+impl HexCodec for Vec<u8> {
+	type Error = hex::FromHexError;
+	fn to_hex(&self) -> String {
+		format!("0x{}", hex::encode(self))
+	}
+	fn from_hex(s: String) -> Result<Self, Self::Error> {
+		hex::decode(s.trim_start_matches("0x"))
+	}
+}
+
+pub fn serialize<S, T>(value: &T, serializer: S) -> Result<S::Ok, S::Error>
+where
+	S: Serializer,
+	T: HexCodec,
+{
+	let s = value.to_hex();
+	serializer.serialize_str(&s)
+}
+
+pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error>
+where
+	D: Deserializer<'de>,
+	T: HexCodec,
+	<T as HexCodec>::Error: core::fmt::Debug,
+{
+	let s = String::deserialize(deserializer)?;
+	let value = T::from_hex(s).map_err(|e| serde::de::Error::custom(format!("{:?}", e)))?;
+	Ok(value)
+}
diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs
index d4b344e20eb..0e5fc3da545 100644
--- a/substrate/frame/revive/src/evm/runtime.rs
+++ b/substrate/frame/revive/src/evm/runtime.rs
@@ -20,7 +20,7 @@ use crate::{
 		api::{GenericTransaction, TransactionSigned},
 		GasEncoder,
 	},
-	AccountIdOf, AddressMapper, BalanceOf, Config, MomentOf, LOG_TARGET,
+	AccountIdOf, AddressMapper, BalanceOf, Config, MomentOf, Weight, LOG_TARGET,
 };
 use alloc::vec::Vec;
 use codec::{Decode, Encode};
@@ -72,6 +72,18 @@ where
 	}
 }
 
+/// Convert a `Weight` into a gas value, using the fixed `GAS_PRICE`.
+/// and the `Config::WeightPrice` to compute the fee.
+/// The gas is calculated as `fee / GAS_PRICE`, rounded up to the nearest integer.
+pub fn gas_from_weight<T: Config>(weight: Weight) -> U256
+where
+	BalanceOf<T>: Into<U256>,
+{
+	use sp_runtime::traits::Convert;
+	let fee: BalanceOf<T> = T::WeightPrice::convert(weight);
+	gas_from_fee(fee)
+}
+
 /// Wraps [`generic::UncheckedExtrinsic`] to support checking unsigned
 /// [`crate::Call::eth_transact`] extrinsic.
 #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)]
diff --git a/substrate/frame/revive/src/evm/tracing.rs b/substrate/frame/revive/src/evm/tracing.rs
new file mode 100644
index 00000000000..7466ec1de48
--- /dev/null
+++ b/substrate/frame/revive/src/evm/tracing.rs
@@ -0,0 +1,134 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+use crate::{
+	evm::{extract_revert_message, CallLog, CallTrace, CallType},
+	primitives::ExecReturnValue,
+	tracing::Tracer,
+	DispatchError, Weight,
+};
+use alloc::{format, string::ToString, vec::Vec};
+use sp_core::{H160, H256, U256};
+
+/// A Tracer that reports logs and nested call traces transactions.
+#[derive(Default, Debug, Clone, PartialEq, Eq)]
+pub struct CallTracer<Gas, GasMapper> {
+	/// Map Weight to Gas equivalent.
+	gas_mapper: GasMapper,
+	/// Store all in-progress CallTrace instances.
+	traces: Vec<CallTrace<Gas>>,
+	/// Stack of indices to the current active traces.
+	current_stack: Vec<usize>,
+	/// whether or not to capture logs.
+	with_log: bool,
+}
+
+impl<Gas, GasMapper> CallTracer<Gas, GasMapper> {
+	/// Create a new [`CallTracer`] instance.
+	pub fn new(with_log: bool, gas_mapper: GasMapper) -> Self {
+		Self { gas_mapper, traces: Vec::new(), current_stack: Vec::new(), with_log }
+	}
+
+	/// Collect the traces and return them.
+	pub fn collect_traces(&mut self) -> Vec<CallTrace<Gas>> {
+		core::mem::take(&mut self.traces)
+	}
+}
+
+impl<Gas: Default, GasMapper: Fn(Weight) -> Gas> Tracer for CallTracer<Gas, GasMapper> {
+	fn enter_child_span(
+		&mut self,
+		from: H160,
+		to: H160,
+		is_delegate_call: bool,
+		is_read_only: bool,
+		value: U256,
+		input: &[u8],
+		gas_left: Weight,
+	) {
+		let call_type = if is_read_only {
+			CallType::StaticCall
+		} else if is_delegate_call {
+			CallType::DelegateCall
+		} else {
+			CallType::Call
+		};
+
+		self.traces.push(CallTrace {
+			from,
+			to,
+			value,
+			call_type,
+			input: input.to_vec(),
+			gas: (self.gas_mapper)(gas_left),
+			..Default::default()
+		});
+
+		// Push the index onto the stack of the current active trace
+		self.current_stack.push(self.traces.len() - 1);
+	}
+
+	fn log_event(&mut self, address: H160, topics: &[H256], data: &[u8]) {
+		if !self.with_log {
+			return;
+		}
+
+		let current_index = self.current_stack.last().unwrap();
+		let position = self.traces[*current_index].calls.len() as u32;
+		let log =
+			CallLog { address, topics: topics.to_vec(), data: data.to_vec().into(), position };
+
+		let current_index = *self.current_stack.last().unwrap();
+		self.traces[current_index].logs.push(log);
+	}
+
+	fn exit_child_span(&mut self, output: &ExecReturnValue, gas_used: Weight) {
+		// Set the output of the current trace
+		let current_index = self.current_stack.pop().unwrap();
+		let trace = &mut self.traces[current_index];
+		trace.output = output.data.clone().into();
+		trace.gas_used = (self.gas_mapper)(gas_used);
+
+		if output.did_revert() {
+			trace.revert_reason = extract_revert_message(&output.data);
+			trace.error = Some("execution reverted".to_string());
+		}
+
+		//  Move the current trace into its parent
+		if let Some(parent_index) = self.current_stack.last() {
+			let child_trace = self.traces.remove(current_index);
+			self.traces[*parent_index].calls.push(child_trace);
+		}
+	}
+	fn exit_child_span_with_error(&mut self, error: DispatchError, gas_used: Weight) {
+		// Set the output of the current trace
+		let current_index = self.current_stack.pop().unwrap();
+		let trace = &mut self.traces[current_index];
+		trace.gas_used = (self.gas_mapper)(gas_used);
+
+		trace.error = match error {
+			DispatchError::Module(sp_runtime::ModuleError { message, .. }) =>
+				Some(message.unwrap_or_default().to_string()),
+			_ => Some(format!("{:?}", error)),
+		};
+
+		//  Move the current trace into its parent
+		if let Some(parent_index) = self.current_stack.last() {
+			let child_trace = self.traces.remove(current_index);
+			self.traces[*parent_index].calls.push(child_trace);
+		}
+	}
+}
diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs
index f696f75a4a1..d2ef6c9c7ba 100644
--- a/substrate/frame/revive/src/exec.rs
+++ b/substrate/frame/revive/src/exec.rs
@@ -17,12 +17,12 @@
 
 use crate::{
 	address::{self, AddressMapper},
-	debug::{CallInterceptor, CallSpan, Tracing},
 	gas::GasMeter,
 	limits,
 	primitives::{ExecReturnValue, StorageDeposit},
 	runtime_decl_for_revive_api::{Decode, Encode, RuntimeDebugNoBound, TypeInfo},
 	storage::{self, meter::Diff, WriteOutcome},
+	tracing::if_tracing,
 	transient_storage::TransientStorage,
 	BalanceOf, CodeInfo, CodeInfoOf, Config, ContractInfo, ContractInfoOf, Error, Event,
 	ImmutableData, ImmutableDataOf, Pallet as Contracts,
@@ -773,7 +773,25 @@ where
 		)? {
 			stack.run(executable, input_data).map(|_| stack.first_frame.last_frame_output)
 		} else {
-			Self::transfer_from_origin(&origin, &origin, &dest, value)
+			if_tracing(|t| {
+				let address =
+					origin.account_id().map(T::AddressMapper::to_address).unwrap_or_default();
+				let dest = T::AddressMapper::to_address(&dest);
+				t.enter_child_span(address, dest, false, false, value, &input_data, Weight::zero());
+			});
+
+			let result = Self::transfer_from_origin(&origin, &origin, &dest, value);
+			match result {
+				Ok(ref output) => {
+					if_tracing(|t| {
+						t.exit_child_span(&output, Weight::zero());
+					});
+				},
+				Err(e) => {
+					if_tracing(|t| t.exit_child_span_with_error(e.error.into(), Weight::zero()));
+				},
+			}
+			result
 		}
 	}
 
@@ -1018,6 +1036,7 @@ where
 	fn run(&mut self, executable: E, input_data: Vec<u8>) -> Result<(), ExecError> {
 		let frame = self.top_frame();
 		let entry_point = frame.entry_point;
+		let is_delegate_call = frame.delegate.is_some();
 		let delegated_code_hash =
 			if frame.delegate.is_some() { Some(*executable.code_hash()) } else { None };
 
@@ -1038,6 +1057,9 @@ where
 		let do_transaction = || -> ExecResult {
 			let caller = self.caller();
 			let frame = top_frame_mut!(self);
+			let read_only = frame.read_only;
+			let value_transferred = frame.value_transferred;
+			let account_id = &frame.account_id.clone();
 
 			// We need to charge the storage deposit before the initial transfer so that
 			// it can create the account in case the initial transfer is < ed.
@@ -1045,10 +1067,11 @@ where
 				// Root origin can't be used to instantiate a contract, so it is safe to assume that
 				// if we reached this point the origin has an associated account.
 				let origin = &self.origin.account_id()?;
+
 				frame.nested_storage.charge_instantiate(
 					origin,
-					&frame.account_id,
-					frame.contract_info.get(&frame.account_id),
+					&account_id,
+					frame.contract_info.get(&account_id),
 					executable.code_info(),
 					self.skip_transfer,
 				)?;
@@ -1069,15 +1092,34 @@ where
 				)?;
 			}
 
-			let contract_address = T::AddressMapper::to_address(&top_frame!(self).account_id);
-
-			let call_span = T::Debug::new_call_span(&contract_address, entry_point, &input_data);
+			let contract_address = T::AddressMapper::to_address(account_id);
+			let maybe_caller_address = caller.account_id().map(T::AddressMapper::to_address);
+
+			if_tracing(|tracer| {
+				tracer.enter_child_span(
+					maybe_caller_address.unwrap_or_default(),
+					contract_address,
+					is_delegate_call,
+					read_only,
+					value_transferred,
+					&input_data,
+					frame.nested_gas.gas_left(),
+				);
+			});
 
-			let output = T::Debug::intercept_call(&contract_address, entry_point, &input_data)
-				.unwrap_or_else(|| executable.execute(self, entry_point, input_data))
-				.map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?;
+			let output = executable.execute(self, entry_point, input_data).map_err(|e| {
+				if_tracing(|tracer| {
+					tracer.exit_child_span_with_error(
+						e.error,
+						top_frame_mut!(self).nested_gas.gas_consumed(),
+					);
+				});
+				ExecError { error: e.error, origin: ErrorOrigin::Callee }
+			})?;
 
-			call_span.after_call(&output);
+			if_tracing(|tracer| {
+				tracer.exit_child_span(&output, top_frame_mut!(self).nested_gas.gas_consumed());
+			});
 
 			// Avoid useless work that would be reverted anyways.
 			if output.did_revert() {
@@ -1353,7 +1395,7 @@ where
 		&mut self,
 		gas_limit: Weight,
 		deposit_limit: U256,
-		dest: &H160,
+		dest_addr: &H160,
 		value: U256,
 		input_data: Vec<u8>,
 		allows_reentry: bool,
@@ -1369,7 +1411,7 @@ where
 		*self.last_frame_output_mut() = Default::default();
 
 		let try_call = || {
-			let dest = T::AddressMapper::to_account_id(dest);
+			let dest = T::AddressMapper::to_account_id(dest_addr);
 			if !self.allows_reentry(&dest) {
 				return Err(<Error<T>>::ReentranceDenied.into());
 			}
@@ -1661,11 +1703,11 @@ where
 	}
 
 	fn deposit_event(&mut self, topics: Vec<H256>, data: Vec<u8>) {
-		Contracts::<Self::T>::deposit_event(Event::ContractEmitted {
-			contract: T::AddressMapper::to_address(self.account_id()),
-			data,
-			topics,
+		let contract = T::AddressMapper::to_address(self.account_id());
+		if_tracing(|tracer| {
+			tracer.log_event(contract, &topics, &data);
 		});
+		Contracts::<Self::T>::deposit_event(Event::ContractEmitted { contract, data, topics });
 	}
 
 	fn block_number(&self) -> U256 {
diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs
index a9f2842c35f..c36cb3f47ca 100644
--- a/substrate/frame/revive/src/lib.rs
+++ b/substrate/frame/revive/src/lib.rs
@@ -35,9 +35,9 @@ mod wasm;
 mod tests;
 
 pub mod chain_extension;
-pub mod debug;
 pub mod evm;
 pub mod test_utils;
+pub mod tracing;
 pub mod weights;
 
 use crate::{
@@ -83,7 +83,6 @@ use sp_runtime::{
 
 pub use crate::{
 	address::{create1, create2, AccountId32Mapper, AddressMapper},
-	debug::Tracing,
 	exec::{MomentOf, Origin},
 	pallet::*,
 };
@@ -118,7 +117,6 @@ const LOG_TARGET: &str = "runtime::revive";
 #[frame_support::pallet]
 pub mod pallet {
 	use super::*;
-	use crate::debug::Debugger;
 	use frame_support::pallet_prelude::*;
 	use frame_system::pallet_prelude::*;
 	use sp_core::U256;
@@ -255,12 +253,6 @@ pub mod pallet {
 		#[pallet::no_default_bounds]
 		type InstantiateOrigin: EnsureOrigin<Self::RuntimeOrigin, Success = Self::AccountId>;
 
-		/// Debugging utilities for contracts.
-		/// For production chains, it's recommended to use the `()` implementation of this
-		/// trait.
-		#[pallet::no_default_bounds]
-		type Debug: Debugger<Self>;
-
 		/// A type that exposes XCM APIs, allowing contracts to interact with other parachains, and
 		/// execute XCM programs.
 		#[pallet::no_default_bounds]
@@ -367,7 +359,6 @@ pub mod pallet {
 			type InstantiateOrigin = EnsureSigned<AccountId>;
 			type WeightInfo = ();
 			type WeightPrice = Self;
-			type Debug = ();
 			type Xcm = ();
 			type RuntimeMemory = ConstU32<{ 128 * 1024 * 1024 }>;
 			type PVFMemory = ConstU32<{ 512 * 1024 * 1024 }>;
@@ -1146,7 +1137,6 @@ where
 			DepositLimit::Unchecked
 		};
 
-		// TODO remove once we have revisited how we encode the gas limit.
 		if tx.nonce.is_none() {
 			tx.nonce = Some(<System<T>>::account_nonce(&origin).into());
 		}
diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs
index 8398bc2cb66..90b9f053a03 100644
--- a/substrate/frame/revive/src/tests.rs
+++ b/substrate/frame/revive/src/tests.rs
@@ -16,12 +16,8 @@
 // limitations under the License.
 
 mod pallet_dummy;
-mod test_debug;
 
-use self::{
-	test_debug::TestDebug,
-	test_utils::{ensure_stored, expected_deposit},
-};
+use self::test_utils::{ensure_stored, expected_deposit};
 use crate::{
 	self as pallet_revive,
 	address::{create1, create2, AddressMapper},
@@ -29,13 +25,14 @@ use crate::{
 		ChainExtension, Environment, Ext, RegisteredChainExtension, Result as ExtensionResult,
 		RetVal, ReturnFlags,
 	},
-	evm::{runtime::GAS_PRICE, GenericTransaction},
+	evm::{runtime::GAS_PRICE, CallTrace, CallTracer, CallType, GenericTransaction},
 	exec::Key,
 	limits,
 	primitives::CodeUploadReturnValue,
 	storage::DeletionQueueManager,
 	test_utils::*,
 	tests::test_utils::{get_contract, get_contract_checked},
+	tracing::trace,
 	wasm::Memory,
 	weights::WeightInfo,
 	AccountId32Mapper, BalanceOf, Code, CodeInfoOf, Config, ContractInfo, ContractInfoOf,
@@ -523,7 +520,6 @@ impl Config for Test {
 	type UploadOrigin = EnsureAccount<Self, UploadAccount>;
 	type InstantiateOrigin = EnsureAccount<Self, InstantiateAccount>;
 	type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent;
-	type Debug = TestDebug;
 	type ChainId = ChainId;
 }
 
@@ -4554,3 +4550,151 @@ fn unstable_interface_rejected() {
 		assert_ok!(builder::bare_instantiate(Code::Upload(code)).build().result);
 	});
 }
+
+#[test]
+fn tracing_works_for_transfers() {
+	ExtBuilder::default().build().execute_with(|| {
+		let _ = <Test as Config>::Currency::set_balance(&ALICE, 100_000_000);
+		let mut tracer = CallTracer::new(false, |_| U256::zero());
+		trace(&mut tracer, || {
+			builder::bare_call(BOB_ADDR).value(10_000_000).build_and_unwrap_result();
+		});
+		assert_eq!(
+			tracer.collect_traces(),
+			vec![CallTrace {
+				from: ALICE_ADDR,
+				to: BOB_ADDR,
+				value: U256::from(10_000_000),
+				call_type: CallType::Call,
+				..Default::default()
+			},]
+		)
+	});
+}
+
+#[test]
+fn tracing_works() {
+	use crate::evm::*;
+	use CallType::*;
+	let (code, _code_hash) = compile_module("tracing").unwrap();
+	let (wasm_callee, _) = compile_module("tracing_callee").unwrap();
+	ExtBuilder::default().existential_deposit(200).build().execute_with(|| {
+		let _ = <Test as Config>::Currency::set_balance(&ALICE, 1_000_000);
+
+		let Contract { addr: addr_callee, .. } =
+			builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract();
+
+		let Contract { addr, .. } =
+			builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract();
+
+		let tracer_options = vec![
+			( false , vec![]),
+			(
+				true ,
+				vec![
+					CallLog {
+						address: addr,
+						topics: Default::default(),
+						data: b"before".to_vec().into(),
+						position: 0,
+					},
+					CallLog {
+						address: addr,
+						topics: Default::default(),
+						data: b"after".to_vec().into(),
+						position: 1,
+					},
+				],
+			),
+		];
+
+		// Verify that the first trace report the same weight reported by bare_call
+		let mut tracer = CallTracer::new(false, |w| w);
+		let gas_used = trace(&mut tracer, || {
+			builder::bare_call(addr).data((3u32, addr_callee).encode()).build().gas_consumed
+		});
+		let traces = tracer.collect_traces();
+		assert_eq!(&traces[0].gas_used, &gas_used);
+
+		// Discarding gas usage, check that traces reported are correct
+		for (with_logs, logs) in tracer_options {
+			let mut tracer = CallTracer::new(with_logs, |_| U256::zero());
+			trace(&mut tracer, || {
+				builder::bare_call(addr).data((3u32, addr_callee).encode()).build()
+			});
+
+
+			assert_eq!(
+				tracer.collect_traces(),
+				vec![CallTrace {
+					from: ALICE_ADDR,
+					to: addr,
+					input: (3u32, addr_callee).encode(),
+					call_type: Call,
+					logs: logs.clone(),
+					calls: vec![
+						CallTrace {
+							from: addr,
+							to: addr_callee,
+							input: 2u32.encode(),
+							output: hex_literal::hex!(
+										"08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001a546869732066756e6374696f6e20616c77617973206661696c73000000000000"
+									).to_vec().into(),
+							revert_reason: Some(
+								"execution reverted: This function always fails".to_string()
+							),
+							error: Some("execution reverted".to_string()),
+							call_type: Call,
+							..Default::default()
+						},
+						CallTrace {
+							from: addr,
+							to: addr,
+							input: (2u32, addr_callee).encode(),
+							call_type: Call,
+							logs: logs.clone(),
+							calls: vec![
+								CallTrace {
+									from: addr,
+									to: addr_callee,
+									input: 1u32.encode(),
+									output: Default::default(),
+									error: Some("ContractTrapped".to_string()),
+									call_type: Call,
+									..Default::default()
+								},
+								CallTrace {
+									from: addr,
+									to: addr,
+									input: (1u32, addr_callee).encode(),
+									call_type: Call,
+									logs: logs.clone(),
+									calls: vec![
+										CallTrace {
+											from: addr,
+											to: addr_callee,
+											input: 0u32.encode(),
+											output: 0u32.to_le_bytes().to_vec().into(),
+											call_type: Call,
+											..Default::default()
+										},
+										CallTrace {
+											from: addr,
+											to: addr,
+											input: (0u32, addr_callee).encode(),
+											call_type: Call,
+											..Default::default()
+										},
+									],
+									..Default::default()
+								},
+							],
+							..Default::default()
+						},
+					],
+					..Default::default()
+				},]
+			);
+		}
+	});
+}
diff --git a/substrate/frame/revive/src/tests/test_debug.rs b/substrate/frame/revive/src/tests/test_debug.rs
deleted file mode 100644
index b1fdb2d4744..00000000000
--- a/substrate/frame/revive/src/tests/test_debug.rs
+++ /dev/null
@@ -1,235 +0,0 @@
-// This file is part of Substrate.
-
-// Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
-
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// 	http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-use super::*;
-
-use crate::{
-	debug::{CallInterceptor, CallSpan, ExecResult, ExportedFunction, Tracing},
-	primitives::ExecReturnValue,
-	test_utils::*,
-	DepositLimit,
-};
-use frame_support::traits::Currency;
-use pretty_assertions::assert_eq;
-use sp_core::H160;
-use std::cell::RefCell;
-
-#[derive(Clone, PartialEq, Eq, Debug)]
-struct DebugFrame {
-	contract_address: sp_core::H160,
-	call: ExportedFunction,
-	input: Vec<u8>,
-	result: Option<Vec<u8>>,
-}
-
-thread_local! {
-	static DEBUG_EXECUTION_TRACE: RefCell<Vec<DebugFrame>> = RefCell::new(Vec::new());
-	static INTERCEPTED_ADDRESS: RefCell<Option<sp_core::H160>> = RefCell::new(None);
-}
-
-pub struct TestDebug;
-pub struct TestCallSpan {
-	contract_address: sp_core::H160,
-	call: ExportedFunction,
-	input: Vec<u8>,
-}
-
-impl Tracing<Test> for TestDebug {
-	type CallSpan = TestCallSpan;
-
-	fn new_call_span(
-		contract_address: &crate::H160,
-		entry_point: ExportedFunction,
-		input_data: &[u8],
-	) -> TestCallSpan {
-		DEBUG_EXECUTION_TRACE.with(|d| {
-			d.borrow_mut().push(DebugFrame {
-				contract_address: *contract_address,
-				call: entry_point,
-				input: input_data.to_vec(),
-				result: None,
-			})
-		});
-		TestCallSpan {
-			contract_address: *contract_address,
-			call: entry_point,
-			input: input_data.to_vec(),
-		}
-	}
-}
-
-impl CallInterceptor<Test> for TestDebug {
-	fn intercept_call(
-		contract_address: &sp_core::H160,
-		_entry_point: ExportedFunction,
-		_input_data: &[u8],
-	) -> Option<ExecResult> {
-		INTERCEPTED_ADDRESS.with(|i| {
-			if i.borrow().as_ref() == Some(contract_address) {
-				Some(Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![] }))
-			} else {
-				None
-			}
-		})
-	}
-}
-
-impl CallSpan for TestCallSpan {
-	fn after_call(self, output: &ExecReturnValue) {
-		DEBUG_EXECUTION_TRACE.with(|d| {
-			d.borrow_mut().push(DebugFrame {
-				contract_address: self.contract_address,
-				call: self.call,
-				input: self.input,
-				result: Some(output.data.clone()),
-			})
-		});
-	}
-}
-
-#[test]
-fn debugging_works() {
-	let (wasm_caller, _) = compile_module("call").unwrap();
-	let (wasm_callee, _) = compile_module("store_call").unwrap();
-
-	fn current_stack() -> Vec<DebugFrame> {
-		DEBUG_EXECUTION_TRACE.with(|stack| stack.borrow().clone())
-	}
-
-	fn deploy(wasm: Vec<u8>) -> H160 {
-		Contracts::bare_instantiate(
-			RuntimeOrigin::signed(ALICE),
-			0,
-			GAS_LIMIT,
-			DepositLimit::Balance(deposit_limit::<Test>()),
-			Code::Upload(wasm),
-			vec![],
-			Some([0u8; 32]),
-		)
-		.result
-		.unwrap()
-		.addr
-	}
-
-	fn constructor_frame(contract_address: &H160, after: bool) -> DebugFrame {
-		DebugFrame {
-			contract_address: *contract_address,
-			call: ExportedFunction::Constructor,
-			input: vec![],
-			result: if after { Some(vec![]) } else { None },
-		}
-	}
-
-	fn call_frame(contract_address: &H160, args: Vec<u8>, after: bool) -> DebugFrame {
-		DebugFrame {
-			contract_address: *contract_address,
-			call: ExportedFunction::Call,
-			input: args,
-			result: if after { Some(vec![]) } else { None },
-		}
-	}
-
-	ExtBuilder::default().existential_deposit(200).build().execute_with(|| {
-		let _ = Balances::deposit_creating(&ALICE, 1_000_000);
-
-		assert_eq!(current_stack(), vec![]);
-
-		let addr_caller = deploy(wasm_caller);
-		let addr_callee = deploy(wasm_callee);
-
-		assert_eq!(
-			current_stack(),
-			vec![
-				constructor_frame(&addr_caller, false),
-				constructor_frame(&addr_caller, true),
-				constructor_frame(&addr_callee, false),
-				constructor_frame(&addr_callee, true),
-			]
-		);
-
-		let main_args = (100u32, &addr_callee.clone()).encode();
-		let inner_args = (100u32).encode();
-
-		assert_ok!(Contracts::call(
-			RuntimeOrigin::signed(ALICE),
-			addr_caller,
-			0,
-			GAS_LIMIT,
-			deposit_limit::<Test>(),
-			main_args.clone()
-		));
-
-		let stack_top = current_stack()[4..].to_vec();
-		assert_eq!(
-			stack_top,
-			vec![
-				call_frame(&addr_caller, main_args.clone(), false),
-				call_frame(&addr_callee, inner_args.clone(), false),
-				call_frame(&addr_callee, inner_args, true),
-				call_frame(&addr_caller, main_args, true),
-			]
-		);
-	});
-}
-
-#[test]
-fn call_interception_works() {
-	let (wasm, _) = compile_module("dummy").unwrap();
-
-	ExtBuilder::default().existential_deposit(200).build().execute_with(|| {
-		let _ = Balances::deposit_creating(&ALICE, 1_000_000);
-
-		let account_id = Contracts::bare_instantiate(
-			RuntimeOrigin::signed(ALICE),
-			0,
-			GAS_LIMIT,
-			deposit_limit::<Test>().into(),
-			Code::Upload(wasm),
-			vec![],
-			// some salt to ensure that the address of this contract is unique among all tests
-			Some([0x41; 32]),
-		)
-		.result
-		.unwrap()
-		.addr;
-
-		// no interception yet
-		assert_ok!(Contracts::call(
-			RuntimeOrigin::signed(ALICE),
-			account_id,
-			0,
-			GAS_LIMIT,
-			deposit_limit::<Test>(),
-			vec![],
-		));
-
-		// intercept calls to this contract
-		INTERCEPTED_ADDRESS.with(|i| *i.borrow_mut() = Some(account_id));
-
-		assert_err_ignore_postinfo!(
-			Contracts::call(
-				RuntimeOrigin::signed(ALICE),
-				account_id,
-				0,
-				GAS_LIMIT,
-				deposit_limit::<Test>(),
-				vec![],
-			),
-			<Error<Test>>::ContractReverted,
-		);
-	});
-}
diff --git a/substrate/frame/revive/src/tracing.rs b/substrate/frame/revive/src/tracing.rs
new file mode 100644
index 00000000000..e9c05f8cb50
--- /dev/null
+++ b/substrate/frame/revive/src/tracing.rs
@@ -0,0 +1,64 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+use crate::{primitives::ExecReturnValue, DispatchError, Weight};
+use environmental::environmental;
+use sp_core::{H160, H256, U256};
+
+environmental!(tracer: dyn Tracer + 'static);
+
+/// Trace the execution of the given closure.
+///
+/// # Warning
+///
+/// Only meant to be called from off-chain code as its additional resource usage is
+/// not accounted for in the weights or memory envelope.
+pub fn trace<R, F: FnOnce() -> R>(tracer: &mut (dyn Tracer + 'static), f: F) -> R {
+	tracer::using_once(tracer, f)
+}
+
+/// Run the closure when tracing is enabled.
+///
+/// This is safe to be called from on-chain code as tracing will never be activated
+/// there. Hence the closure is not executed in this case.
+pub(crate) fn if_tracing<F: FnOnce(&mut (dyn Tracer + 'static))>(f: F) {
+	tracer::with(f);
+}
+
+/// Defines methods to trace contract interactions.
+pub trait Tracer {
+	/// Called before a contract call is executed
+	fn enter_child_span(
+		&mut self,
+		from: H160,
+		to: H160,
+		is_delegate_call: bool,
+		is_read_only: bool,
+		value: U256,
+		input: &[u8],
+		gas: Weight,
+	);
+
+	/// Record a log event
+	fn log_event(&mut self, event: H160, topics: &[H256], data: &[u8]);
+
+	/// Called after a contract call is executed
+	fn exit_child_span(&mut self, output: &ExecReturnValue, gas_left: Weight);
+
+	/// Called when a contract call terminates with an error
+	fn exit_child_span_with_error(&mut self, error: DispatchError, gas_left: Weight);
+}
-- 
GitLab


From 06f5d486f552e2ead543024168035bcdbb29c027 Mon Sep 17 00:00:00 2001
From: Sebastian Kunert <skunert49@gmail.com>
Date: Mon, 20 Jan 2025 09:25:43 +0100
Subject: [PATCH 079/116] Collator: Fix `can_build_upon` by always allowing to
 build on included block (#7205)

Follow-up to #6825, which introduced this bug.

We use the `can_build_upon` method to ask the runtime if it is fine to
build another block. The runtime checks this based on the
[`ConsensusHook`](https://github.com/paritytech/polkadot-sdk/blob/c1b7c3025aa4423d4cf3e57309b60fb7602c2db6/cumulus/pallets/aura-ext/src/consensus_hook.rs#L110-L110)
implementation, the most popular one being the `FixedConsensusHook`.

In #6825 I removed a check that would always allow us to build when we
are building on an included block. Turns out this check is still
required when:
1. The [`UnincludedSegment`
](https://github.com/paritytech/polkadot-sdk/blob/c1b7c3025aa4423d4cf3e57309b60fb7602c2db6/cumulus/pallets/parachain-system/src/lib.rs#L758-L758)
storage item in pallet-parachain-system is equal or larger than the
unincluded segment.
2. We are calling the `can_build_upon` runtime API where the included
block has progressed offchain to the current parent block (so last entry
in the `UnincludedSegment` storage item).

In this scenario the last entry in `UnincludedSegment` does not have a
hash assigned yet (because it was not available in `on_finalize` of the
previous block). So the unincluded segment will be reported at its
maximum length which will forbid building another block.

Ideally we would have a more elegant solution than to rely on the
node-side here. But for now the check is reintroduced and a test is
added to not break it again by accident.

---------

Co-authored-by: command-bot <>
Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com>
---
 Cargo.lock                                    |   3 +
 cumulus/client/consensus/aura/Cargo.toml      |   5 +
 .../consensus/aura/src/collators/mod.rs       | 132 +++++++++++++++++-
 prdoc/pr_7205.prdoc                           |  10 ++
 4 files changed, 144 insertions(+), 6 deletions(-)
 create mode 100644 prdoc/pr_7205.prdoc

diff --git a/Cargo.lock b/Cargo.lock
index da4e8551191..c9a139f3074 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4640,6 +4640,8 @@ dependencies = [
  "cumulus-primitives-aura 0.7.0",
  "cumulus-primitives-core 0.7.0",
  "cumulus-relay-chain-interface",
+ "cumulus-test-client",
+ "cumulus-test-relay-sproof-builder 0.7.0",
  "futures",
  "parity-scale-codec",
  "parking_lot 0.12.3",
@@ -4664,6 +4666,7 @@ dependencies = [
  "sp-consensus-aura 0.32.0",
  "sp-core 28.0.0",
  "sp-inherents 26.0.0",
+ "sp-keyring 31.0.0",
  "sp-keystore 0.34.0",
  "sp-runtime 31.0.1",
  "sp-state-machine 0.35.0",
diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml
index 70223093864..8637133a5f5 100644
--- a/cumulus/client/consensus/aura/Cargo.toml
+++ b/cumulus/client/consensus/aura/Cargo.toml
@@ -59,6 +59,11 @@ polkadot-node-subsystem-util = { workspace = true, default-features = true }
 polkadot-overseer = { workspace = true, default-features = true }
 polkadot-primitives = { workspace = true, default-features = true }
 
+[dev-dependencies]
+cumulus-test-client = { workspace = true }
+cumulus-test-relay-sproof-builder = { workspace = true }
+sp-keyring = { workspace = true }
+
 [features]
 # Allows collator to use full PoV size for block building
 full-pov-size = []
diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs
index 031fa963ba6..66c6086eaf9 100644
--- a/cumulus/client/consensus/aura/src/collators/mod.rs
+++ b/cumulus/client/consensus/aura/src/collators/mod.rs
@@ -179,12 +179,19 @@ where
 	let authorities = runtime_api.authorities(parent_hash).ok()?;
 	let author_pub = aura_internal::claim_slot::<P>(para_slot, &authorities, keystore).await?;
 
-	let Ok(Some(api_version)) =
-		runtime_api.api_version::<dyn AuraUnincludedSegmentApi<Block>>(parent_hash)
-	else {
-		return (parent_hash == included_block)
-			.then(|| SlotClaim::unchecked::<P>(author_pub, para_slot, timestamp));
-	};
+	// This function is typically called when we want to build block N. At that point, the
+	// unincluded segment in the runtime is unaware of the hash of block N-1. If the unincluded
+	// segment in the runtime is full, but block N-1 is the included block, the unincluded segment
+	// should have length 0 and we can build. Since the hash is not available to the runtime
+	// however, we need this extra check here.
+	if parent_hash == included_block {
+		return Some(SlotClaim::unchecked::<P>(author_pub, para_slot, timestamp));
+	}
+
+	let api_version = runtime_api
+		.api_version::<dyn AuraUnincludedSegmentApi<Block>>(parent_hash)
+		.ok()
+		.flatten()?;
 
 	let slot = if api_version > 1 { relay_slot } else { para_slot };
 
@@ -243,3 +250,116 @@ where
 		.max_by_key(|a| a.depth)
 		.map(|parent| (included_block, parent))
 }
+
+#[cfg(test)]
+mod tests {
+	use crate::collators::can_build_upon;
+	use codec::Encode;
+	use cumulus_primitives_aura::Slot;
+	use cumulus_primitives_core::BlockT;
+	use cumulus_relay_chain_interface::PHash;
+	use cumulus_test_client::{
+		runtime::{Block, Hash},
+		Client, DefaultTestClientBuilderExt, InitBlockBuilder, TestClientBuilder,
+		TestClientBuilderExt,
+	};
+	use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder;
+	use polkadot_primitives::HeadData;
+	use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy};
+	use sp_consensus::BlockOrigin;
+	use sp_keystore::{Keystore, KeystorePtr};
+	use sp_timestamp::Timestamp;
+	use std::sync::Arc;
+
+	async fn import_block<I: BlockImport<Block>>(
+		importer: &I,
+		block: Block,
+		origin: BlockOrigin,
+		import_as_best: bool,
+	) {
+		let (header, body) = block.deconstruct();
+
+		let mut block_import_params = BlockImportParams::new(origin, header);
+		block_import_params.fork_choice = Some(ForkChoiceStrategy::Custom(import_as_best));
+		block_import_params.body = Some(body);
+		importer.import_block(block_import_params).await.unwrap();
+	}
+
+	fn sproof_with_parent_by_hash(client: &Client, hash: PHash) -> RelayStateSproofBuilder {
+		let header = client.header(hash).ok().flatten().expect("No header for parent block");
+		let included = HeadData(header.encode());
+		let mut builder = RelayStateSproofBuilder::default();
+		builder.para_id = cumulus_test_client::runtime::PARACHAIN_ID.into();
+		builder.included_para_head = Some(included);
+
+		builder
+	}
+	async fn build_and_import_block(client: &Client, included: Hash) -> Block {
+		let sproof = sproof_with_parent_by_hash(client, included);
+
+		let block_builder = client.init_block_builder(None, sproof).block_builder;
+
+		let block = block_builder.build().unwrap().block;
+
+		let origin = BlockOrigin::NetworkInitialSync;
+		import_block(client, block.clone(), origin, true).await;
+		block
+	}
+
+	fn set_up_components() -> (Arc<Client>, KeystorePtr) {
+		let keystore = Arc::new(sp_keystore::testing::MemoryKeystore::new()) as Arc<_>;
+		for key in sp_keyring::Sr25519Keyring::iter() {
+			Keystore::sr25519_generate_new(
+				&*keystore,
+				sp_application_crypto::key_types::AURA,
+				Some(&key.to_seed()),
+			)
+			.expect("Can insert key into MemoryKeyStore");
+		}
+		(Arc::new(TestClientBuilder::new().build()), keystore)
+	}
+
+	/// This tests a special scenario where the unincluded segment in the runtime
+	/// is full. We are calling `can_build_upon`, passing the last built block as the
+	/// included one. In the runtime we will not find the hash of the included block in the
+	/// unincluded segment. The `can_build_upon` runtime API would therefore return `false`, but
+	/// we are ensuring on the node side that we are are always able to build on the included block.
+	#[tokio::test]
+	async fn test_can_build_upon() {
+		let (client, keystore) = set_up_components();
+
+		let genesis_hash = client.chain_info().genesis_hash;
+		let mut last_hash = genesis_hash;
+
+		// Fill up the unincluded segment tracker in the runtime.
+		while can_build_upon::<_, _, sp_consensus_aura::sr25519::AuthorityPair>(
+			Slot::from(u64::MAX),
+			Slot::from(u64::MAX),
+			Timestamp::default(),
+			last_hash,
+			genesis_hash,
+			&*client,
+			&keystore,
+		)
+		.await
+		.is_some()
+		{
+			let block = build_and_import_block(&client, genesis_hash).await;
+			last_hash = block.header().hash();
+		}
+
+		// Blocks were built with the genesis hash set as included block.
+		// We call `can_build_upon` with the last built block as the included block.
+		let result = can_build_upon::<_, _, sp_consensus_aura::sr25519::AuthorityPair>(
+			Slot::from(u64::MAX),
+			Slot::from(u64::MAX),
+			Timestamp::default(),
+			last_hash,
+			last_hash,
+			&*client,
+			&keystore,
+		)
+		.await;
+		assert!(result.is_some());
+	}
+}
diff --git a/prdoc/pr_7205.prdoc b/prdoc/pr_7205.prdoc
new file mode 100644
index 00000000000..758beb0b631
--- /dev/null
+++ b/prdoc/pr_7205.prdoc
@@ -0,0 +1,10 @@
+title: 'Collator: Fix `can_build_upon` by always allowing to build on included block'
+doc:
+- audience: Node Dev
+  description: |-
+    Fixes a bug introduced in #6825.
+    We should always allow building on the included block of parachains. In situations where the unincluded segment
+    is full, but the included block moved to the most recent block, building was wrongly disallowed.
+crates:
+- name: cumulus-client-consensus-aura
+  bump: minor
-- 
GitLab


From 4937f779068d1ab947c9eada8e1d3f5b7191eb94 Mon Sep 17 00:00:00 2001
From: seemantaggarwal <32275622+seemantaggarwal@users.noreply.github.com>
Date: Mon, 20 Jan 2025 15:51:29 +0530
Subject: [PATCH 080/116] Use docify export for parachain template hardcoded
 configuration and embed it in its README #6333 (#7093)

Use docify export for parachain template hardcoded configuration and
embed it in its README #6333

Docify currently has a limitation of not being able to embed a
variable/const in its code, without embedding it's definition, even if
do something in a string like

"this is a sample string ${sample_variable}"

It will embed the entire string
"this is a sample string ${sample_variable}"
without replacing the value of sample_variable from the code

Hence, the goal was just to make it obvious in the README where the
PARACHAIN_ID value is coming from, so a note has been added at the start
for the same, so whenever somebody is running these commands, they will
be aware about the value and replace accordingly.

To make it simpler, we added a
rust ignore block so the user can just look it up in the readme itself
and does not have to scan through the runtime directory for the value.

---------

Co-authored-by: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com>
---
 .github/scripts/generate-prdoc.py             |   4 +-
 .github/workflows/misc-sync-templates.yml     |   6 +
 Cargo.lock                                    | 220 ++++++++-------
 Cargo.toml                                    |   1 +
 prdoc/pr_7093.prdoc                           |   8 +
 templates/parachain/Cargo.toml                |  16 ++
 templates/parachain/README.docify.md          | 254 ++++++++++++++++++
 templates/parachain/README.md                 |  46 ++--
 .../runtime/src/genesis_config_presets.rs     |   1 +
 templates/parachain/src/lib.rs                |  22 ++
 10 files changed, 465 insertions(+), 113 deletions(-)
 create mode 100644 prdoc/pr_7093.prdoc
 create mode 100644 templates/parachain/Cargo.toml
 create mode 100644 templates/parachain/README.docify.md
 create mode 100644 templates/parachain/src/lib.rs

diff --git a/.github/scripts/generate-prdoc.py b/.github/scripts/generate-prdoc.py
index 9154f185e64..43e8437a0c9 100644
--- a/.github/scripts/generate-prdoc.py
+++ b/.github/scripts/generate-prdoc.py
@@ -86,10 +86,10 @@ def create_prdoc(pr, audience, title, description, patch, bump, force):
 			if p == '/':
 				exit(1)
 			p = os.path.dirname(p)
-		
+
 		with open(os.path.join(p, "Cargo.toml")) as f:
 			manifest = toml.load(f)
-		
+
 		if not "package" in manifest:
 			continue
 		
diff --git a/.github/workflows/misc-sync-templates.yml b/.github/workflows/misc-sync-templates.yml
index 8d06d89621d..ac66e697562 100644
--- a/.github/workflows/misc-sync-templates.yml
+++ b/.github/workflows/misc-sync-templates.yml
@@ -131,6 +131,12 @@ jobs:
       - name: Copy over the new changes
         run: |
           cp -r polkadot-sdk/templates/${{ matrix.template }}/* "${{ env.template-path }}/"
+      - name: Remove unnecessary files from parachain template
+        if: ${{ matrix.template == 'parachain' }}
+        run: |
+          rm -f "${{ env.template-path }}/README.docify.md"
+          rm -f "${{ env.template-path }}/Cargo.toml"
+          rm -f "${{ env.template-path }}/src/lib.rs"
 
       - name: Run psvm on monorepo workspace dependencies
         run: psvm -o -v ${{ github.event.inputs.stable_release_branch }} -p ./Cargo.toml
diff --git a/Cargo.lock b/Cargo.lock
index c9a139f3074..0907830c5e7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -36,6 +36,12 @@ version = "1.0.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
 
+[[package]]
+name = "adler2"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
+
 [[package]]
 name = "adler32"
 version = "1.2.0"
@@ -112,9 +118,9 @@ dependencies = [
 
 [[package]]
 name = "aho-corasick"
-version = "1.0.4"
+version = "1.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a"
+checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
 dependencies = [
  "memchr",
 ]
@@ -363,23 +369,24 @@ dependencies = [
 
 [[package]]
 name = "anstream"
-version = "0.6.11"
+version = "0.6.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5"
+checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b"
 dependencies = [
  "anstyle",
  "anstyle-parse",
  "anstyle-query",
  "anstyle-wincon",
  "colorchoice",
+ "is_terminal_polyfill",
  "utf8parse",
 ]
 
 [[package]]
 name = "anstyle"
-version = "1.0.6"
+version = "1.0.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc"
+checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
 
 [[package]]
 name = "anstyle-parse"
@@ -401,12 +408,12 @@ dependencies = [
 
 [[package]]
 name = "anstyle-wincon"
-version = "3.0.1"
+version = "3.0.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628"
+checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125"
 dependencies = [
  "anstyle",
- "windows-sys 0.48.0",
+ "windows-sys 0.59.0",
 ]
 
 [[package]]
@@ -1679,7 +1686,7 @@ dependencies = [
  "cc",
  "cfg-if",
  "libc",
- "miniz_oxide",
+ "miniz_oxide 0.7.1",
  "object 0.32.2",
  "rustc-demangle",
 ]
@@ -3079,12 +3086,12 @@ dependencies = [
 
 [[package]]
 name = "bstr"
-version = "1.6.0"
+version = "1.11.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05"
+checksum = "531a9155a481e2ee699d4f98f43c0ca4ff8ee1bfd55c31e9e98fb29d2b176fe0"
 dependencies = [
  "memchr",
- "regex-automata 0.3.6",
+ "regex-automata 0.4.8",
  "serde",
 ]
 
@@ -3187,9 +3194,9 @@ dependencies = [
 
 [[package]]
 name = "cargo-platform"
-version = "0.1.3"
+version = "0.1.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479"
+checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea"
 dependencies = [
  "serde",
 ]
@@ -3202,7 +3209,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a"
 dependencies = [
  "camino",
  "cargo-platform",
- "semver 1.0.18",
+ "semver 1.0.24",
  "serde",
  "serde_json",
  "thiserror",
@@ -3487,12 +3494,12 @@ dependencies = [
 
 [[package]]
 name = "clap"
-version = "4.5.13"
+version = "4.5.26"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0fbb260a053428790f3de475e304ff84cdbc4face759ea7a3e64c1edd938a7fc"
+checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783"
 dependencies = [
  "clap_builder",
- "clap_derive 4.5.13",
+ "clap_derive 4.5.24",
 ]
 
 [[package]]
@@ -3506,24 +3513,24 @@ dependencies = [
 
 [[package]]
 name = "clap_builder"
-version = "4.5.13"
+version = "4.5.26"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64b17d7ea74e9f833c7dbf2cbe4fb12ff26783eda4782a8975b72f895c9b4d99"
+checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121"
 dependencies = [
  "anstream",
  "anstyle",
- "clap_lex 0.7.0",
+ "clap_lex 0.7.4",
  "strsim 0.11.1",
  "terminal_size",
 ]
 
 [[package]]
 name = "clap_complete"
-version = "4.5.13"
+version = "4.5.42"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aa3c596da3cf0983427b0df0dba359df9182c13bd5b519b585a482b0c351f4e8"
+checksum = "33a7e468e750fa4b6be660e8b5651ad47372e8fb114030b594c2d75d48c5ffd0"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
 ]
 
 [[package]]
@@ -3541,9 +3548,9 @@ dependencies = [
 
 [[package]]
 name = "clap_derive"
-version = "4.5.13"
+version = "4.5.24"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0"
+checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c"
 dependencies = [
  "heck 0.5.0",
  "proc-macro2 1.0.86",
@@ -3562,9 +3569,9 @@ dependencies = [
 
 [[package]]
 name = "clap_lex"
-version = "0.7.0"
+version = "0.7.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce"
+checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
 
 [[package]]
 name = "cmd_lib"
@@ -3750,23 +3757,23 @@ dependencies = [
 
 [[package]]
 name = "color-print"
-version = "0.3.4"
+version = "0.3.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f2a5e6504ed8648554968650feecea00557a3476bc040d0ffc33080e66b646d0"
+checksum = "3aa954171903797d5623e047d9ab69d91b493657917bdfb8c2c80ecaf9cdb6f4"
 dependencies = [
  "color-print-proc-macro",
 ]
 
 [[package]]
 name = "color-print-proc-macro"
-version = "0.3.4"
+version = "0.3.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d51beaa537d73d2d1ff34ee70bc095f170420ab2ec5d687ecd3ec2b0d092514b"
+checksum = "692186b5ebe54007e45a59aea47ece9eb4108e141326c304cdc91699a7118a22"
 dependencies = [
  "nom",
  "proc-macro2 1.0.86",
  "quote 1.0.37",
- "syn 1.0.109",
+ "syn 2.0.87",
 ]
 
 [[package]]
@@ -4441,7 +4448,7 @@ dependencies = [
  "anes",
  "cast",
  "ciborium",
- "clap 4.5.13",
+ "clap 4.5.26",
  "criterion-plot",
  "futures",
  "is-terminal",
@@ -4586,7 +4593,7 @@ dependencies = [
 name = "cumulus-client-cli"
 version = "0.7.0"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "parity-scale-codec",
  "sc-chain-spec",
  "sc-cli",
@@ -5250,7 +5257,7 @@ name = "cumulus-pov-validator"
 version = "0.1.0"
 dependencies = [
  "anyhow",
- "clap 4.5.13",
+ "clap 4.5.26",
  "parity-scale-codec",
  "polkadot-node-primitives",
  "polkadot-parachain-primitives 6.0.0",
@@ -5690,7 +5697,7 @@ name = "cumulus-test-service"
 version = "0.1.0"
 dependencies = [
  "async-trait",
- "clap 4.5.13",
+ "clap 4.5.26",
  "criterion",
  "cumulus-client-cli",
  "cumulus-client-collator",
@@ -5784,9 +5791,9 @@ dependencies = [
 
 [[package]]
 name = "curl-sys"
-version = "0.4.72+curl-8.6.0"
+version = "0.4.78+curl-8.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "29cbdc8314c447d11e8fd156dcdd031d9e02a7a976163e396b548c03153bc9ea"
+checksum = "8eec768341c5c7789611ae51cf6c459099f22e64a5d5d0ce4892434e33821eaf"
 dependencies = [
  "cc",
  "libc",
@@ -6938,14 +6945,14 @@ dependencies = [
 
 [[package]]
 name = "filetime"
-version = "0.2.22"
+version = "0.2.25"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4029edd3e734da6fe05b6cd7bd2960760a616bd2ddd0d59a0124746d6272af0"
+checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586"
 dependencies = [
  "cfg-if",
  "libc",
- "redox_syscall 0.3.5",
- "windows-sys 0.48.0",
+ "libredox",
+ "windows-sys 0.59.0",
 ]
 
 [[package]]
@@ -7022,12 +7029,12 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
 
 [[package]]
 name = "flate2"
-version = "1.0.27"
+version = "1.0.35"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010"
+checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c"
 dependencies = [
  "crc32fast",
- "miniz_oxide",
+ "miniz_oxide 0.8.2",
 ]
 
 [[package]]
@@ -7180,7 +7187,7 @@ dependencies = [
  "Inflector",
  "array-bytes",
  "chrono",
- "clap 4.5.13",
+ "clap 4.5.26",
  "comfy-table",
  "cumulus-client-parachain-inherent",
  "cumulus-primitives-proof-size-hostfunction 0.2.0",
@@ -7346,7 +7353,7 @@ dependencies = [
 name = "frame-election-solution-type-fuzzer"
 version = "2.0.0-alpha.5"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "frame-election-provider-solution-type 13.0.0",
  "frame-election-provider-support 28.0.0",
  "frame-support 28.0.0",
@@ -7479,7 +7486,7 @@ name = "frame-omni-bencher"
 version = "0.1.0"
 dependencies = [
  "assert_cmd",
- "clap 4.5.13",
+ "clap 4.5.26",
  "cumulus-primitives-proof-size-hostfunction 0.2.0",
  "cumulus-test-runtime",
  "frame-benchmarking-cli",
@@ -9194,6 +9201,12 @@ dependencies = [
  "winapi",
 ]
 
+[[package]]
+name = "is_terminal_polyfill"
+version = "1.70.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
+
 [[package]]
 name = "isahc"
 version = "1.7.2"
@@ -10285,6 +10298,17 @@ dependencies = [
  "yamux 0.13.3",
 ]
 
+[[package]]
+name = "libredox"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d"
+dependencies = [
+ "bitflags 2.6.0",
+ "libc",
+ "redox_syscall 0.5.8",
+]
+
 [[package]]
 name = "librocksdb-sys"
 version = "0.11.0+8.1.1"
@@ -10361,9 +10385,9 @@ dependencies = [
 
 [[package]]
 name = "libz-sys"
-version = "1.1.12"
+version = "1.1.21"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b"
+checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa"
 dependencies = [
  "cc",
  "libc",
@@ -10832,7 +10856,7 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
 name = "minimal-template-node"
 version = "0.0.0"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "docify",
  "futures",
  "futures-timer",
@@ -10862,6 +10886,15 @@ dependencies = [
  "adler",
 ]
 
+[[package]]
+name = "miniz_oxide"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394"
+dependencies = [
+ "adler2",
+]
+
 [[package]]
 name = "mio"
 version = "1.0.2"
@@ -11339,7 +11372,7 @@ version = "0.9.0-dev"
 dependencies = [
  "array-bytes",
  "async-trait",
- "clap 4.5.13",
+ "clap 4.5.26",
  "derive_more 0.99.17",
  "fs_extra",
  "futures",
@@ -11415,7 +11448,7 @@ dependencies = [
 name = "node-runtime-generate-bags"
 version = "3.0.0"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "generate-bags",
  "kitchensink-runtime",
 ]
@@ -11424,7 +11457,7 @@ dependencies = [
 name = "node-template-release"
 version = "3.0.0"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "flate2",
  "fs_extra",
  "glob",
@@ -14916,7 +14949,7 @@ name = "pallet-revive-eth-rpc"
 version = "0.1.0"
 dependencies = [
  "anyhow",
- "clap 4.5.13",
+ "clap 4.5.26",
  "env_logger 0.11.3",
  "ethabi",
  "futures",
@@ -16252,11 +16285,18 @@ dependencies = [
  "staging-xcm-builder 17.0.1",
 ]
 
+[[package]]
+name = "parachain-template"
+version = "0.0.0"
+dependencies = [
+ "docify",
+]
+
 [[package]]
 name = "parachain-template-node"
 version = "0.0.0"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "color-print",
  "docify",
  "futures",
@@ -17240,7 +17280,7 @@ name = "polkadot-cli"
 version = "7.0.0"
 dependencies = [
  "cfg-if",
- "clap 4.5.13",
+ "clap 4.5.26",
  "frame-benchmarking-cli",
  "futures",
  "log",
@@ -18111,7 +18151,7 @@ version = "0.1.0"
 dependencies = [
  "assert_cmd",
  "async-trait",
- "clap 4.5.13",
+ "clap 4.5.26",
  "color-print",
  "cumulus-client-cli",
  "cumulus-client-collator",
@@ -19629,7 +19669,7 @@ dependencies = [
  "async-trait",
  "bincode",
  "bitvec",
- "clap 4.5.13",
+ "clap 4.5.26",
  "clap-num",
  "color-eyre",
  "colored",
@@ -19731,7 +19771,7 @@ version = "1.0.0"
 dependencies = [
  "assert_matches",
  "async-trait",
- "clap 4.5.13",
+ "clap 4.5.26",
  "color-eyre",
  "futures",
  "futures-timer",
@@ -19873,7 +19913,7 @@ dependencies = [
 name = "polkadot-voter-bags"
 version = "7.0.0"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "generate-bags",
  "sp-io 30.0.0",
  "westend-runtime",
@@ -21197,12 +21237,6 @@ dependencies = [
  "regex-syntax 0.6.29",
 ]
 
-[[package]]
-name = "regex-automata"
-version = "0.3.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69"
-
 [[package]]
 name = "regex-automata"
 version = "0.4.8"
@@ -21302,7 +21336,7 @@ dependencies = [
 name = "remote-ext-tests-bags-list"
 version = "1.0.0"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "frame-system 28.0.0",
  "log",
  "pallet-bags-list-remote-tests",
@@ -21915,7 +21949,7 @@ version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
 dependencies = [
- "semver 1.0.18",
+ "semver 1.0.24",
 ]
 
 [[package]]
@@ -22334,7 +22368,7 @@ name = "sc-chain-spec"
 version = "28.0.0"
 dependencies = [
  "array-bytes",
- "clap 4.5.13",
+ "clap 4.5.26",
  "docify",
  "log",
  "memmap2 0.9.3",
@@ -22377,7 +22411,7 @@ version = "0.36.0"
 dependencies = [
  "array-bytes",
  "chrono",
- "clap 4.5.13",
+ "clap 4.5.26",
  "fdlimit",
  "futures",
  "futures-timer",
@@ -23725,7 +23759,7 @@ dependencies = [
 name = "sc-storage-monitor"
 version = "0.16.0"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "fs4",
  "log",
  "sp-core 28.0.0",
@@ -24299,9 +24333,9 @@ dependencies = [
 
 [[package]]
 name = "semver"
-version = "1.0.18"
+version = "1.0.24"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918"
+checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba"
 dependencies = [
  "serde",
 ]
@@ -25613,7 +25647,7 @@ dependencies = [
 name = "solochain-template-node"
 version = "0.0.0"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "frame-benchmarking-cli",
  "frame-metadata-hash-extension 0.1.0",
  "frame-system 28.0.0",
@@ -26996,7 +27030,7 @@ dependencies = [
 name = "sp-npos-elections-fuzzer"
 version = "2.0.0-alpha.5"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "honggfuzz",
  "rand",
  "sp-npos-elections 26.0.0",
@@ -28220,7 +28254,7 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
 name = "staging-chain-spec-builder"
 version = "1.6.1"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "cmd_lib",
  "docify",
  "log",
@@ -28237,7 +28271,7 @@ version = "3.0.0-dev"
 dependencies = [
  "array-bytes",
  "assert_cmd",
- "clap 4.5.13",
+ "clap 4.5.26",
  "clap_complete",
  "criterion",
  "futures",
@@ -28274,7 +28308,7 @@ dependencies = [
 name = "staging-node-inspect"
 version = "0.12.0"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "parity-scale-codec",
  "sc-cli",
  "sc-client-api",
@@ -28619,7 +28653,7 @@ dependencies = [
 name = "subkey"
 version = "9.0.0"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "sc-cli",
 ]
 
@@ -29045,7 +29079,7 @@ dependencies = [
  "rand",
  "reqwest 0.12.9",
  "scale-info",
- "semver 1.0.18",
+ "semver 1.0.24",
  "serde",
  "serde_json",
  "sp-version 35.0.0",
@@ -29458,9 +29492,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
 
 [[package]]
 name = "tar"
-version = "0.4.40"
+version = "0.4.43"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b16afcea1f22891c49a00c751c7b63b2233284064f11a200fc624137c51e2ddb"
+checksum = "c65998313f8e17d0d553d28f91a0df93e4dbbbf770279c7bc21ca0f09ea1a1f6"
 dependencies = [
  "filetime",
  "libc",
@@ -29508,12 +29542,12 @@ dependencies = [
 
 [[package]]
 name = "terminal_size"
-version = "0.3.0"
+version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7"
+checksum = "5352447f921fda68cf61b4101566c0bdb5104eff6804d0678e5227580ab6a4e9"
 dependencies = [
  "rustix 0.38.42",
- "windows-sys 0.48.0",
+ "windows-sys 0.59.0",
 ]
 
 [[package]]
@@ -29560,7 +29594,7 @@ dependencies = [
 name = "test-parachain-adder-collator"
 version = "1.0.0"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "futures",
  "futures-timer",
  "log",
@@ -29607,7 +29641,7 @@ dependencies = [
 name = "test-parachain-undying-collator"
 version = "1.0.0"
 dependencies = [
- "clap 4.5.13",
+ "clap 4.5.26",
  "futures",
  "futures-timer",
  "log",
@@ -31903,11 +31937,13 @@ dependencies = [
 
 [[package]]
 name = "xattr"
-version = "1.0.1"
+version = "1.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f4686009f71ff3e5c4dbcf1a282d0a44db3f021ba69350cd42086b3e5f1c6985"
+checksum = "e105d177a3871454f754b33bb0ee637ecaaac997446375fd3e5d43a2ed00c909"
 dependencies = [
  "libc",
+ "linux-raw-sys 0.4.14",
+ "rustix 0.38.42",
 ]
 
 [[package]]
diff --git a/Cargo.toml b/Cargo.toml
index e17f08148b1..18c1dd2c68d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -538,6 +538,7 @@ members = [
 	"templates/minimal/node",
 	"templates/minimal/pallets/template",
 	"templates/minimal/runtime",
+	"templates/parachain",
 	"templates/parachain/node",
 	"templates/parachain/pallets/template",
 	"templates/parachain/runtime",
diff --git a/prdoc/pr_7093.prdoc b/prdoc/pr_7093.prdoc
new file mode 100644
index 00000000000..cad4477e883
--- /dev/null
+++ b/prdoc/pr_7093.prdoc
@@ -0,0 +1,8 @@
+title: 'initial docify readme with some content #6333'
+doc:
+- audience: Runtime Dev
+  description: |
+      Docifying the README.MD under templates/parachain by adding a Docify.
+      Also Adding the Cargo.toml under the same folder, essentially making it a crate as Docify acts
+      for Readmes only under the same crate.
+crates: [ ]
diff --git a/templates/parachain/Cargo.toml b/templates/parachain/Cargo.toml
new file mode 100644
index 00000000000..84b9d5e29bb
--- /dev/null
+++ b/templates/parachain/Cargo.toml
@@ -0,0 +1,16 @@
+[package]
+name = "parachain-template"
+description = "A parachain-template helper crate to keep documentation in sync with the template's components."
+version = "0.0.0"
+license = "Unlicense"
+authors.workspace = true
+homepage.workspace = true
+repository.workspace = true
+edition.workspace = true
+publish = false
+
+[dependencies]
+docify = "0.2.9"
+
+[features]
+generate-readme = []
diff --git a/templates/parachain/README.docify.md b/templates/parachain/README.docify.md
new file mode 100644
index 00000000000..47385e0bbf1
--- /dev/null
+++ b/templates/parachain/README.docify.md
@@ -0,0 +1,254 @@
+<div align="center">
+
+# Polkadot SDK's Parachain Template
+
+<img height="70px" alt="Polkadot SDK Logo" src="https://github.com/paritytech/polkadot-sdk/raw/master/docs/images/Polkadot_Logo_Horizontal_Pink_White.png#gh-dark-mode-only"/>
+<img height="70px" alt="Polkadot SDK Logo" src="https://github.com/paritytech/polkadot-sdk/raw/master/docs/images/Polkadot_Logo_Horizontal_Pink_Black.png#gh-light-mode-only"/>
+
+> This is a template for creating a [parachain](https://wiki.polkadot.network/docs/learn-parachains) based on Polkadot SDK.
+>
+> This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk).
+
+</div>
+
+## Table of Contents
+
+- [Intro](#intro)
+
+- [Template Structure](#template-structure)
+
+- [Getting Started](#getting-started)
+
+- [Starting a Development Chain](#starting-a-development-chain)
+
+  - [Omni Node](#omni-node-prerequisites)
+  - [Zombienet setup with Omni Node](#zombienet-setup-with-omni-node)
+  - [Parachain Template Node](#parachain-template-node)
+  - [Connect with the Polkadot-JS Apps Front-End](#connect-with-the-polkadot-js-apps-front-end)
+  - [Takeaways](#takeaways)
+
+- [Runtime development](#runtime-development)
+- [Contributing](#contributing)
+- [Getting Help](#getting-help)
+
+## Intro
+
+- ⏫ This template provides a starting point to build a [parachain](https://wiki.polkadot.network/docs/learn-parachains).
+
+- ☁️ It is based on the
+  [Cumulus](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/cumulus/index.html) framework.
+
+- 🔧 Its runtime is configured with a single custom pallet as a starting point, and a handful of ready-made pallets
+  such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html).
+
+- 👉 Learn more about parachains [here](https://wiki.polkadot.network/docs/learn-parachains)
+
+## Template Structure
+
+A Polkadot SDK based project such as this one consists of:
+
+- 🧮 the [Runtime](./runtime/README.md) - the core logic of the parachain.
+- 🎨 the [Pallets](./pallets/README.md) - from which the runtime is constructed.
+- 💿 a [Node](./node/README.md) - the binary application, not part of the project default-members list and not compiled unless
+  building the project with `--workspace` flag, which builds all workspace members, and is an alternative to
+  [Omni Node](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html).
+
+## Getting Started
+
+- 🦀 The template is using the Rust language.
+
+- 👉 Check the
+  [Rust installation instructions](https://www.rust-lang.org/tools/install) for your system.
+
+- 🛠️ Depending on your operating system and Rust version, there might be additional
+  packages required to compile this template - please take note of the Rust compiler output.
+
+Fetch parachain template code:
+
+```sh
+git clone https://github.com/paritytech/polkadot-sdk-parachain-template.git parachain-template
+
+cd parachain-template
+```
+
+## Starting a Development Chain
+
+The parachain template relies on a hardcoded parachain id which is defined in the runtime code
+and referenced throughout the contents of this file as `{{PARACHAIN_ID}}`. Please replace
+any command or file referencing this placeholder with the value of the `PARACHAIN_ID` constant:
+
+<!-- docify::embed!("runtime/src/genesis_config_presets.rs", PARACHAIN_ID)-->
+
+### Omni Node Prerequisites
+
+[Omni Node](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html) can
+be used to run the parachain template's runtime. `polkadot-omni-node` binary crate usage is described at a high-level
+[on crates.io](https://crates.io/crates/polkadot-omni-node).
+
+#### Install `polkadot-omni-node`
+
+Please see the installation section at [`crates.io/omni-node`](https://crates.io/crates/polkadot-omni-node).
+
+#### Build `parachain-template-runtime`
+
+```sh
+cargo build --release
+```
+
+#### Install `staging-chain-spec-builder`
+
+Please see the installation section at [`crates.io/staging-chain-spec-builder`](https://crates.io/crates/staging-chain-spec-builder).
+
+#### Use `chain-spec-builder` to generate the `chain_spec.json` file
+
+```sh
+chain-spec-builder create --relay-chain "rococo-local" --para-id {{PARACHAIN_ID}} --runtime \
+    target/release/wbuild/parachain-template-runtime/parachain_template_runtime.wasm named-preset development
+```
+
+**Note**: the `relay-chain` and `para-id` flags are mandatory information required by
+Omni Node, and for parachain template case the value for `para-id` must be set to `{{PARACHAIN_ID}}`, since this
+is also the value injected through [ParachainInfo](https://docs.rs/staging-parachain-info/0.17.0/staging_parachain_info/)
+pallet into the `parachain-template-runtime`'s storage. The `relay-chain` value is set in accordance
+with the relay chain ID where this instantiation of parachain-template will connect to.
+
+#### Run Omni Node
+
+Start Omni Node with the generated chain spec. We'll start it in development mode (without a relay chain config), producing
+and finalizing blocks based on manual seal, configured below to seal a block with each second.
+
+```bash
+polkadot-omni-node --chain <path/to/chain_spec.json> --dev --dev-block-time 1000
+```
+
+However, such a setup is not close to what would run in production, and for that we need to setup a local
+relay chain network that will help with the block finalization. In this guide we'll setup a local relay chain
+as well. We'll not do it manually, by starting one node at a time, but we'll use [zombienet](https://paritytech.github.io/zombienet/intro.html).
+
+Follow through the next section for more details on how to do it.
+
+### Zombienet setup with Omni Node
+
+Assuming we continue from the last step of the previous section, we have a chain spec and we need to setup a relay chain.
+We can install `zombienet` as described [here](https://paritytech.github.io/zombienet/install.html#installation), and
+`zombienet-omni-node.toml` contains the network specification we want to start.
+
+#### Relay chain prerequisites
+
+Download the `polkadot` (and the accompanying `polkadot-prepare-worker` and `polkadot-execute-worker`) binaries from
+[Polkadot SDK releases](https://github.com/paritytech/polkadot-sdk/releases). Then expose them on `PATH` like so:
+
+```sh
+export PATH="$PATH:<path/to/binaries>"
+```
+
+#### Update `zombienet-omni-node.toml` with a valid chain spec path
+
+```toml
+# ...
+[[parachains]]
+id = {{PARACHAIN_ID}}
+chain_spec_path = "<TO BE UPDATED WITH A VALID PATH>"
+# ...
+```
+
+#### Start the network
+
+```sh
+zombienet --provider native spawn zombienet-omni-node.toml
+```
+
+### Parachain Template Node
+
+As mentioned in the `Template Structure` section, the `node` crate is optionally compiled and it is an alternative
+to `Omni Node`. Similarly, it requires setting up a relay chain, and we'll use `zombienet` once more.
+
+#### Install the `parachain-template-node`
+
+```sh
+cargo install --path node
+```
+
+#### Setup and start the network
+
+For setup, please consider the instructions for `zombienet` installation [here](https://paritytech.github.io/zombienet/install.html#installation)
+and [relay chain prerequisites](#relay-chain-prerequisites).
+
+We're left just with starting the network:
+
+```sh
+zombienet --provider native spawn zombienet.toml
+```
+
+### Connect with the Polkadot-JS Apps Front-End
+
+- 🌐 You can interact with your local node using the
+  hosted version of the Polkadot/Substrate Portal:
+  [relay chain](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944)
+  and [parachain](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9988).
+
+- 🪐 A hosted version is also
+  available on [IPFS](https://dotapps.io/).
+
+- 🧑‍🔧 You can also find the source code and instructions for hosting your own instance in the
+  [`polkadot-js/apps`](https://github.com/polkadot-js/apps) repository.
+
+### Takeaways
+
+Development parachains:
+
+- 🔗 Connect to relay chains, and we showcased how to connect to a local one.
+- 🧹 Do not persist the state.
+- 💰 Are preconfigured with a genesis state that includes several prefunded development accounts.
+- 🧑‍⚖️ Development accounts are used as validators, collators, and `sudo` accounts.
+
+## Runtime development
+
+We recommend using [`chopsticks`](https://github.com/AcalaNetwork/chopsticks) when the focus is more on the runtime
+development and `OmniNode` is enough as is.
+
+### Install chopsticks
+
+To use `chopsticks`, please install the latest version according to the installation [guide](https://github.com/AcalaNetwork/chopsticks?tab=readme-ov-file#install).
+
+### Build a raw chain spec
+
+Build the `parachain-template-runtime` as mentioned before in this guide and use `chain-spec-builder`
+again but this time by passing `--raw-storage` flag:
+
+```sh
+chain-spec-builder create --raw-storage --relay-chain "rococo-local" --para-id {{PARACHAIN_ID}} --runtime \
+    target/release/wbuild/parachain-template-runtime/parachain_template_runtime.wasm named-preset development
+```
+
+### Start `chopsticks` with the chain spec
+
+```sh
+npx @acala-network/chopsticks@latest --chain-spec <path/to/chain_spec.json>
+```
+
+### Alternatives
+
+`OmniNode` can be still used for runtime development if using the `--dev` flag, while `parachain-template-node` doesn't
+support it at this moment. It can still be used to test a runtime in a full setup where it is started alongside a
+relay chain network (see [Parachain Template node](#parachain-template-node) setup).
+
+## Contributing
+
+- 🔄 This template is automatically updated after releases in the main [Polkadot SDK monorepo](https://github.com/paritytech/polkadot-sdk).
+
+- ➡️ Any pull requests should be directed to this [source](https://github.com/paritytech/polkadot-sdk/tree/master/templates/parachain).
+
+- 😇 Please refer to the monorepo's
+  [contribution guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and
+  [Code of Conduct](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CODE_OF_CONDUCT.md).
+
+## Getting Help
+
+- 🧑‍🏫 To learn about Polkadot in general, [Polkadot.network](https://polkadot.network/) website is a good starting point.
+
+- 🧑‍🔧 For technical introduction, [here](https://github.com/paritytech/polkadot-sdk#-documentation) are
+  the Polkadot SDK documentation resources.
+
+- 👥 Additionally, there are [GitHub issues](https://github.com/paritytech/polkadot-sdk/issues) and
+  [Substrate StackExchange](https://substrate.stackexchange.com/).
diff --git a/templates/parachain/README.md b/templates/parachain/README.md
index c1e333df9e9..15e9f7fe61c 100644
--- a/templates/parachain/README.md
+++ b/templates/parachain/README.md
@@ -36,10 +36,10 @@
 - ⏫ This template provides a starting point to build a [parachain](https://wiki.polkadot.network/docs/learn-parachains).
 
 - ☁️ It is based on the
-[Cumulus](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/cumulus/index.html) framework.
+  [Cumulus](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/cumulus/index.html) framework.
 
 - 🔧 Its runtime is configured with a single custom pallet as a starting point, and a handful of ready-made pallets
-such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html).
+  such as a [Balances pallet](https://paritytech.github.io/polkadot-sdk/master/pallet_balances/index.html).
 
 - 👉 Learn more about parachains [here](https://wiki.polkadot.network/docs/learn-parachains)
 
@@ -50,18 +50,18 @@ A Polkadot SDK based project such as this one consists of:
 - 🧮 the [Runtime](./runtime/README.md) - the core logic of the parachain.
 - 🎨 the [Pallets](./pallets/README.md) - from which the runtime is constructed.
 - 💿 a [Node](./node/README.md) - the binary application, not part of the project default-members list and not compiled unless
-building the project with `--workspace` flag, which builds all workspace members, and is an alternative to
-[Omni Node](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html).
+  building the project with `--workspace` flag, which builds all workspace members, and is an alternative to
+  [Omni Node](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html).
 
 ## Getting Started
 
 - 🦀 The template is using the Rust language.
 
 - 👉 Check the
-[Rust installation instructions](https://www.rust-lang.org/tools/install) for your system.
+  [Rust installation instructions](https://www.rust-lang.org/tools/install) for your system.
 
 - 🛠️ Depending on your operating system and Rust version, there might be additional
-packages required to compile this template - please take note of the Rust compiler output.
+  packages required to compile this template - please take note of the Rust compiler output.
 
 Fetch parachain template code:
 
@@ -73,6 +73,14 @@ cd parachain-template
 
 ## Starting a Development Chain
 
+The parachain template relies on a hardcoded parachain id which is defined in the runtime code
+and referenced throughout the contents of this file as `{{PARACHAIN_ID}}`. Please replace
+any command or file referencing this placeholder with the value of the `PARACHAIN_ID` constant:
+
+```rust,ignore
+pub const PARACHAIN_ID: u32 = 1000;
+```
+
 ### Omni Node Prerequisites
 
 [Omni Node](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/reference_docs/omni_node/index.html) can
@@ -96,12 +104,12 @@ Please see the installation section at [`crates.io/staging-chain-spec-builder`](
 #### Use `chain-spec-builder` to generate the `chain_spec.json` file
 
 ```sh
-chain-spec-builder create --relay-chain "rococo-local" --para-id 1000 --runtime \
+chain-spec-builder create --relay-chain "rococo-local" --para-id {{PARACHAIN_ID}} --runtime \
     target/release/wbuild/parachain-template-runtime/parachain_template_runtime.wasm named-preset development
 ```
 
 **Note**: the `relay-chain` and `para-id` flags are mandatory information required by
-Omni Node, and for parachain template case the value for `para-id` must be set to `1000`, since this
+Omni Node, and for parachain template case the value for `para-id` must be set to `{{PARACHAIN_ID}}`, since this
 is also the value injected through [ParachainInfo](https://docs.rs/staging-parachain-info/0.17.0/staging_parachain_info/)
 pallet into the `parachain-template-runtime`'s storage. The `relay-chain` value is set in accordance
 with the relay chain ID where this instantiation of parachain-template will connect to.
@@ -141,7 +149,7 @@ export PATH="$PATH:<path/to/binaries>"
 ```toml
 # ...
 [[parachains]]
-id = 1000
+id = {{PARACHAIN_ID}}
 chain_spec_path = "<TO BE UPDATED WITH A VALID PATH>"
 # ...
 ```
@@ -177,15 +185,15 @@ zombienet --provider native spawn zombienet.toml
 ### Connect with the Polkadot-JS Apps Front-End
 
 - 🌐 You can interact with your local node using the
-hosted version of the Polkadot/Substrate Portal:
-[relay chain](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944)
-and [parachain](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9988).
+  hosted version of the Polkadot/Substrate Portal:
+  [relay chain](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944)
+  and [parachain](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9988).
 
 - 🪐 A hosted version is also
-available on [IPFS](https://dotapps.io/).
+  available on [IPFS](https://dotapps.io/).
 
 - 🧑‍🔧 You can also find the source code and instructions for hosting your own instance in the
-[`polkadot-js/apps`](https://github.com/polkadot-js/apps) repository.
+  [`polkadot-js/apps`](https://github.com/polkadot-js/apps) repository.
 
 ### Takeaways
 
@@ -211,7 +219,7 @@ Build the `parachain-template-runtime` as mentioned before in this guide and use
 again but this time by passing `--raw-storage` flag:
 
 ```sh
-chain-spec-builder create --raw-storage --relay-chain "rococo-local" --para-id 1000 --runtime \
+chain-spec-builder create --raw-storage --relay-chain "rococo-local" --para-id {{PARACHAIN_ID}} --runtime \
     target/release/wbuild/parachain-template-runtime/parachain_template_runtime.wasm named-preset development
 ```
 
@@ -234,15 +242,15 @@ relay chain network (see [Parachain Template node](#parachain-template-node) set
 - ➡️ Any pull requests should be directed to this [source](https://github.com/paritytech/polkadot-sdk/tree/master/templates/parachain).
 
 - 😇 Please refer to the monorepo's
-[contribution guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and
-[Code of Conduct](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CODE_OF_CONDUCT.md).
+  [contribution guidelines](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and
+  [Code of Conduct](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CODE_OF_CONDUCT.md).
 
 ## Getting Help
 
 - 🧑‍🏫 To learn about Polkadot in general, [Polkadot.network](https://polkadot.network/) website is a good starting point.
 
 - 🧑‍🔧 For technical introduction, [here](https://github.com/paritytech/polkadot-sdk#-documentation) are
-the Polkadot SDK documentation resources.
+  the Polkadot SDK documentation resources.
 
 - 👥 Additionally, there are [GitHub issues](https://github.com/paritytech/polkadot-sdk/issues) and
-[Substrate StackExchange](https://substrate.stackexchange.com/).
+  [Substrate StackExchange](https://substrate.stackexchange.com/).
diff --git a/templates/parachain/runtime/src/genesis_config_presets.rs b/templates/parachain/runtime/src/genesis_config_presets.rs
index f1b24e43724..8cdadca5060 100644
--- a/templates/parachain/runtime/src/genesis_config_presets.rs
+++ b/templates/parachain/runtime/src/genesis_config_presets.rs
@@ -17,6 +17,7 @@ use sp_keyring::Sr25519Keyring;
 /// The default XCM version to set in genesis config.
 const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION;
 /// Parachain id used for genesis config presets of parachain template.
+#[docify::export_content]
 pub const PARACHAIN_ID: u32 = 1000;
 
 /// Generate the session keys from individual elements.
diff --git a/templates/parachain/src/lib.rs b/templates/parachain/src/lib.rs
new file mode 100644
index 00000000000..d3c5b8ba310
--- /dev/null
+++ b/templates/parachain/src/lib.rs
@@ -0,0 +1,22 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Cumulus.
+
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
+
+// The parachain-template crate helps with keeping the README.md in sync
+// with code sections across the components under the template: node,
+// pallets & runtime, by using `docify`.
+
+#[cfg(feature = "generate-readme")]
+docify::compile_markdown!("README.docify.md", "README.md");
-- 
GitLab


From d5d9b1276a088a6bd7a8c2c698320dad3d0ee2c4 Mon Sep 17 00:00:00 2001
From: Sebastian Kunert <skunert49@gmail.com>
Date: Mon, 20 Jan 2025 12:02:59 +0100
Subject: [PATCH 081/116] Stabilize
 `ensure_execute_processes_have_correct_num_threads` test (#7253)

Saw this test flake a few times, last time
[here](https://github.com/paritytech/polkadot-sdk/actions/runs/12834432188/job/35791830215).

We first fetch all processes in the test, then query `/proc/<pid>/stat`
for every one of them. When the file was not found, we would error. Now
we tolerate not finding this file. Ran 200 times locally without error,
before would fail a few times, probably depending on process fluctuation
(which I expect to be high on CI runners).
---
 polkadot/node/core/pvf/tests/it/process.rs | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/polkadot/node/core/pvf/tests/it/process.rs b/polkadot/node/core/pvf/tests/it/process.rs
index 353367b394f..29326365b5b 100644
--- a/polkadot/node/core/pvf/tests/it/process.rs
+++ b/polkadot/node/core/pvf/tests/it/process.rs
@@ -77,7 +77,9 @@ fn find_process_by_sid_and_name(
 
 	let mut found = None;
 	for process in all_processes {
-		let stat = process.stat().expect("/proc existed above. Potential race occurred");
+		let Ok(stat) = process.stat() else {
+			continue;
+		};
 
 		if stat.session != sid || !process.exe().unwrap().to_str().unwrap().contains(exe_name) {
 			continue
-- 
GitLab


From ea27696aeed8e76cfb82492f6f3665948d766fe5 Mon Sep 17 00:00:00 2001
From: PG Herveou <pgherveou@gmail.com>
Date: Mon, 20 Jan 2025 12:47:29 +0100
Subject: [PATCH 082/116] [pallet-revive] eth-rpc error logging (#7251)

Log error instead of failing with an error when block processing fails

---------

Co-authored-by: command-bot <>
---
 .../runtimes/assets/asset-hub-westend/src/lib.rs           | 2 +-
 prdoc/pr_7251.prdoc                                        | 7 +++++++
 substrate/frame/revive/rpc/src/client.rs                   | 4 +++-
 3 files changed, 11 insertions(+), 2 deletions(-)
 create mode 100644 prdoc/pr_7251.prdoc

diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
index 41f29fe2c56..f56c4568f2d 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
@@ -129,7 +129,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
 	spec_name: alloc::borrow::Cow::Borrowed("westmint"),
 	impl_name: alloc::borrow::Cow::Borrowed("westmint"),
 	authoring_version: 1,
-	spec_version: 1_017_004,
+	spec_version: 1_017_005,
 	impl_version: 0,
 	apis: RUNTIME_API_VERSIONS,
 	transaction_version: 16,
diff --git a/prdoc/pr_7251.prdoc b/prdoc/pr_7251.prdoc
new file mode 100644
index 00000000000..98e371dc940
--- /dev/null
+++ b/prdoc/pr_7251.prdoc
@@ -0,0 +1,7 @@
+title: '[pallet-revive] eth-rpc error logging'
+doc:
+- audience: Runtime Dev
+  description: Log error instead of failing with an error when block processing fails
+crates:
+- name: pallet-revive-eth-rpc
+  bump: minor
diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs
index c61c5871f76..a5a022f9722 100644
--- a/substrate/frame/revive/rpc/src/client.rs
+++ b/substrate/frame/revive/rpc/src/client.rs
@@ -310,7 +310,9 @@ impl Client {
 			};
 
 			log::debug!(target: LOG_TARGET, "Pushing block: {}", block.number());
-			callback(block).await?;
+			if let Err(err) = callback(block).await {
+				log::error!(target: LOG_TARGET, "Failed to process block: {err:?}");
+			}
 		}
 
 		log::info!(target: LOG_TARGET, "Block subscription ended");
-- 
GitLab


From 115ff4e98ecc301a3d380a2fc53ec2304647c69d Mon Sep 17 00:00:00 2001
From: Branislav Kontur <bkontur@gmail.com>
Date: Mon, 20 Jan 2025 13:48:25 +0100
Subject: [PATCH 083/116] Apply a few minor fixes found while addressing the
 fellows PR for weights. (#7098)

This PR addresses a few minor issues found while working on the
polkadot-fellows PR
[https://github.com/polkadot-fellows/runtimes/pull/522](https://github.com/polkadot-fellows/runtimes/pull/522):
- Incorrect generic type for `InboundLaneData` in
`check_message_lane_weights`.
- Renaming leftovers: `assigner_on_demand` -> `on_demand`.
---
 bridges/bin/runtime-common/src/integrity.rs | 11 +++++++----
 bridges/modules/messages/src/lib.rs         |  4 ++--
 polkadot/runtime/rococo/src/lib.rs          |  2 +-
 3 files changed, 10 insertions(+), 7 deletions(-)

diff --git a/bridges/bin/runtime-common/src/integrity.rs b/bridges/bin/runtime-common/src/integrity.rs
index 535f1a26e5e..61dbf09109a 100644
--- a/bridges/bin/runtime-common/src/integrity.rs
+++ b/bridges/bin/runtime-common/src/integrity.rs
@@ -21,11 +21,11 @@
 
 use bp_header_chain::ChainWithGrandpa;
 use bp_messages::{ChainWithMessages, InboundLaneData, MessageNonce};
-use bp_runtime::Chain;
+use bp_runtime::{AccountIdOf, Chain};
 use codec::Encode;
 use frame_support::{storage::generator::StorageValue, traits::Get, weights::Weight};
 use frame_system::limits;
-use pallet_bridge_messages::WeightInfoExt as _;
+use pallet_bridge_messages::{ThisChainOf, WeightInfoExt as _};
 
 // Re-export to avoid include all dependencies everywhere.
 #[doc(hidden)]
@@ -364,8 +364,11 @@ pub fn check_message_lane_weights<
 	);
 
 	// check that weights allow us to receive delivery confirmations
-	let max_incoming_inbound_lane_data_proof_size =
-		InboundLaneData::<()>::encoded_size_hint_u32(this_chain_max_unrewarded_relayers as _);
+	let max_incoming_inbound_lane_data_proof_size = InboundLaneData::<
+		AccountIdOf<ThisChainOf<T, MessagesPalletInstance>>,
+	>::encoded_size_hint_u32(
+		this_chain_max_unrewarded_relayers as _
+	);
 	pallet_bridge_messages::ensure_able_to_receive_confirmation::<Weights<T, MessagesPalletInstance>>(
 		C::max_extrinsic_size(),
 		C::max_extrinsic_weight(),
diff --git a/bridges/modules/messages/src/lib.rs b/bridges/modules/messages/src/lib.rs
index af14257db99..61763186cb0 100644
--- a/bridges/modules/messages/src/lib.rs
+++ b/bridges/modules/messages/src/lib.rs
@@ -230,8 +230,8 @@ pub mod pallet {
 			// why do we need to know the weight of this (`receive_messages_proof`) call? Because
 			// we may want to return some funds for not-dispatching (or partially dispatching) some
 			// messages to the call origin (relayer). And this is done by returning actual weight
-			// from the call. But we only know dispatch weight of every messages. So to refund
-			// relayer because we have not dispatched Message, we need to:
+			// from the call. But we only know dispatch weight of every message. So to refund
+			// relayer because we have not dispatched message, we need to:
 			//
 			// ActualWeight = DeclaredWeight - Message.DispatchWeight
 			//
diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs
index b3f2a003327..c2c3d35ee5b 100644
--- a/polkadot/runtime/rococo/src/lib.rs
+++ b/polkadot/runtime/rococo/src/lib.rs
@@ -1822,7 +1822,7 @@ mod benches {
 		[polkadot_runtime_parachains::initializer, Initializer]
 		[polkadot_runtime_parachains::paras_inherent, ParaInherent]
 		[polkadot_runtime_parachains::paras, Paras]
-		[polkadot_runtime_parachains::assigner_on_demand, OnDemandAssignmentProvider]
+		[polkadot_runtime_parachains::on_demand, OnDemandAssignmentProvider]
 		// Substrate
 		[pallet_balances, Balances]
 		[pallet_balances, NisCounterpartBalances]
-- 
GitLab


From 569ce71e2c759b26601608f145d9b5efcb906919 Mon Sep 17 00:00:00 2001
From: Ron <yrong1997@gmail.com>
Date: Mon, 20 Jan 2025 22:16:57 +0800
Subject: [PATCH 084/116] Migrate pallet-mmr to umbrella crate (#7081)

Part of https://github.com/paritytech/polkadot-sdk/issues/6504
---
 Cargo.lock                                    |  8 +-
 prdoc/pr_7081.prdoc                           | 14 +++
 .../frame/merkle-mountain-range/Cargo.toml    | 24 +-----
 .../merkle-mountain-range/src/benchmarking.rs | 10 ++-
 .../src/default_weights.rs                    |  9 +-
 .../frame/merkle-mountain-range/src/lib.rs    | 85 +++++++++----------
 .../merkle-mountain-range/src/mmr/mmr.rs      | 45 +++++-----
 .../merkle-mountain-range/src/mmr/mod.rs      |  5 +-
 .../merkle-mountain-range/src/mmr/storage.rs  | 38 +++++----
 .../frame/merkle-mountain-range/src/mock.rs   | 18 ++--
 .../frame/merkle-mountain-range/src/tests.rs  | 23 ++---
 11 files changed, 137 insertions(+), 142 deletions(-)
 create mode 100644 prdoc/pr_7081.prdoc

diff --git a/Cargo.lock b/Cargo.lock
index 0907830c5e7..50d36338cd2 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -14147,18 +14147,12 @@ dependencies = [
 name = "pallet-mmr"
 version = "27.0.0"
 dependencies = [
- "array-bytes",
- "frame-benchmarking 28.0.0",
- "frame-support 28.0.0",
- "frame-system 28.0.0",
  "itertools 0.11.0",
  "log",
  "parity-scale-codec",
+ "polkadot-sdk-frame 0.1.0",
  "scale-info",
- "sp-core 28.0.0",
- "sp-io 30.0.0",
  "sp-mmr-primitives 26.0.0",
- "sp-runtime 31.0.1",
  "sp-tracing 16.0.0",
 ]
 
diff --git a/prdoc/pr_7081.prdoc b/prdoc/pr_7081.prdoc
new file mode 100644
index 00000000000..be1d8aa6ee0
--- /dev/null
+++ b/prdoc/pr_7081.prdoc
@@ -0,0 +1,14 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: '[pallet-mmr] Migrate to using frame umbrella crate'
+
+doc:
+  - audience: Runtime Dev
+    description: This PR migrates the pallet-mmr to use the frame umbrella crate. This
+      is part of the ongoing effort to migrate all pallets to use the frame umbrella crate.
+      The effort is tracked [here](https://github.com/paritytech/polkadot-sdk/issues/6504).
+
+crates:
+  - name: pallet-mmr
+    bump: minor
diff --git a/substrate/frame/merkle-mountain-range/Cargo.toml b/substrate/frame/merkle-mountain-range/Cargo.toml
index 04f5ab64100..ecbef01a920 100644
--- a/substrate/frame/merkle-mountain-range/Cargo.toml
+++ b/substrate/frame/merkle-mountain-range/Cargo.toml
@@ -16,18 +16,12 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { workspace = true }
-frame-benchmarking = { optional = true, workspace = true }
-frame-support = { workspace = true }
-frame-system = { workspace = true }
+frame = { workspace = true, features = ["runtime"] }
 log = { workspace = true }
 scale-info = { features = ["derive"], workspace = true }
-sp-core = { workspace = true }
-sp-io = { workspace = true }
 sp-mmr-primitives = { workspace = true }
-sp-runtime = { workspace = true }
 
 [dev-dependencies]
-array-bytes = { workspace = true, default-features = true }
 itertools = { workspace = true }
 sp-tracing = { workspace = true, default-features = true }
 
@@ -35,24 +29,14 @@ sp-tracing = { workspace = true, default-features = true }
 default = ["std"]
 std = [
 	"codec/std",
-	"frame-benchmarking?/std",
-	"frame-support/std",
-	"frame-system/std",
+	"frame/std",
 	"log/std",
 	"scale-info/std",
-	"sp-core/std",
-	"sp-io/std",
 	"sp-mmr-primitives/std",
-	"sp-runtime/std",
 ]
 runtime-benchmarks = [
-	"frame-benchmarking/runtime-benchmarks",
-	"frame-support/runtime-benchmarks",
-	"frame-system/runtime-benchmarks",
-	"sp-runtime/runtime-benchmarks",
+	"frame/runtime-benchmarks",
 ]
 try-runtime = [
-	"frame-support/try-runtime",
-	"frame-system/try-runtime",
-	"sp-runtime/try-runtime",
+	"frame/try-runtime",
 ]
diff --git a/substrate/frame/merkle-mountain-range/src/benchmarking.rs b/substrate/frame/merkle-mountain-range/src/benchmarking.rs
index 07afd9529eb..407f1f7ead6 100644
--- a/substrate/frame/merkle-mountain-range/src/benchmarking.rs
+++ b/substrate/frame/merkle-mountain-range/src/benchmarking.rs
@@ -20,8 +20,10 @@
 #![cfg(feature = "runtime-benchmarks")]
 
 use crate::*;
-use frame_benchmarking::v1::benchmarks_instance_pallet;
-use frame_support::traits::OnInitialize;
+use frame::{
+	benchmarking::prelude::v1::benchmarks_instance_pallet,
+	deps::frame_support::traits::OnInitialize,
+};
 
 benchmarks_instance_pallet! {
 	on_initialize {
@@ -31,10 +33,10 @@ benchmarks_instance_pallet! {
 
 		<<T as pallet::Config::<I>>::BenchmarkHelper as BenchmarkHelper>::setup();
 		for leaf in 0..(leaves - 1) {
-			Pallet::<T, I>::on_initialize((leaf as u32).into());
+			<Pallet::<T, I> as OnInitialize<BlockNumberFor<T>>>::on_initialize((leaf as u32).into());
 		}
 	}: {
-		Pallet::<T, I>::on_initialize((leaves as u32 - 1).into());
+		<Pallet::<T, I> as OnInitialize<BlockNumberFor<T>>>::on_initialize((leaves as u32 - 1).into());
 	} verify {
 		assert_eq!(crate::NumberOfLeaves::<T, I>::get(), leaves);
 	}
diff --git a/substrate/frame/merkle-mountain-range/src/default_weights.rs b/substrate/frame/merkle-mountain-range/src/default_weights.rs
index b0ef0539018..d1ed12edd06 100644
--- a/substrate/frame/merkle-mountain-range/src/default_weights.rs
+++ b/substrate/frame/merkle-mountain-range/src/default_weights.rs
@@ -18,16 +18,13 @@
 //! Default weights for the MMR Pallet
 //! This file was not auto-generated.
 
-use frame_support::weights::{
-	constants::{RocksDbWeight as DbWeight, WEIGHT_REF_TIME_PER_NANOS},
-	Weight,
-};
+use frame::{deps::frame_support::weights::constants::*, weights_prelude::*};
 
 impl crate::WeightInfo for () {
 	fn on_initialize(peaks: u32) -> Weight {
 		let peaks = u64::from(peaks);
 		// Reading the parent hash.
-		let leaf_weight = DbWeight::get().reads(1);
+		let leaf_weight = RocksDbWeight::get().reads(1);
 		// Blake2 hash cost.
 		let hash_weight = Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_NANOS, 0);
 		// No-op hook.
@@ -36,6 +33,6 @@ impl crate::WeightInfo for () {
 		leaf_weight
 			.saturating_add(hash_weight)
 			.saturating_add(hook_weight)
-			.saturating_add(DbWeight::get().reads_writes(2 + peaks, 2 + peaks))
+			.saturating_add(RocksDbWeight::get().reads_writes(2 + peaks, 2 + peaks))
 	}
 }
diff --git a/substrate/frame/merkle-mountain-range/src/lib.rs b/substrate/frame/merkle-mountain-range/src/lib.rs
index 7dfe95c8336..76d6c2a1ac7 100644
--- a/substrate/frame/merkle-mountain-range/src/lib.rs
+++ b/substrate/frame/merkle-mountain-range/src/lib.rs
@@ -59,20 +59,17 @@
 extern crate alloc;
 
 use alloc::vec::Vec;
-use frame_support::weights::Weight;
-use frame_system::pallet_prelude::{BlockNumberFor, HeaderFor};
 use log;
-use sp_mmr_primitives::utils;
-use sp_runtime::{
-	traits::{self, One, Saturating},
-	SaturatedConversion,
-};
 
-pub use pallet::*;
+use frame::prelude::*;
+
 pub use sp_mmr_primitives::{
-	self as primitives, utils::NodesUtils, Error, LeafDataProvider, LeafIndex, NodeIndex,
+	self as primitives, utils, utils::NodesUtils, AncestryProof, Error, FullLeaf, LeafDataProvider,
+	LeafIndex, LeafProof, NodeIndex, OnNewRoot,
 };
 
+pub use pallet::*;
+
 #[cfg(feature = "runtime-benchmarks")]
 mod benchmarking;
 mod default_weights;
@@ -90,11 +87,11 @@ mod tests;
 /// crate-local wrapper over [frame_system::Pallet]. Since the current block hash
 /// is not available (since the block is not finished yet),
 /// we use the `parent_hash` here along with parent block number.
-pub struct ParentNumberAndHash<T: frame_system::Config> {
-	_phantom: core::marker::PhantomData<T>,
+pub struct ParentNumberAndHash<T: Config> {
+	_phantom: PhantomData<T>,
 }
 
-impl<T: frame_system::Config> LeafDataProvider for ParentNumberAndHash<T> {
+impl<T: Config> LeafDataProvider for ParentNumberAndHash<T> {
 	type LeafData = (BlockNumberFor<T>, <T as frame_system::Config>::Hash);
 
 	fn leaf_data() -> Self::LeafData {
@@ -111,13 +108,11 @@ pub trait BlockHashProvider<BlockNumber, BlockHash> {
 }
 
 /// Default implementation of BlockHashProvider using frame_system.
-pub struct DefaultBlockHashProvider<T: frame_system::Config> {
+pub struct DefaultBlockHashProvider<T: Config> {
 	_phantom: core::marker::PhantomData<T>,
 }
 
-impl<T: frame_system::Config> BlockHashProvider<BlockNumberFor<T>, T::Hash>
-	for DefaultBlockHashProvider<T>
-{
+impl<T: Config> BlockHashProvider<BlockNumberFor<T>, T::Hash> for DefaultBlockHashProvider<T> {
 	fn block_hash(block_number: BlockNumberFor<T>) -> T::Hash {
 		frame_system::Pallet::<T>::block_hash(block_number)
 	}
@@ -142,17 +137,16 @@ impl BenchmarkHelper for () {
 type ModuleMmr<StorageType, T, I> = mmr::Mmr<StorageType, T, I, LeafOf<T, I>>;
 
 /// Leaf data.
-type LeafOf<T, I> = <<T as Config<I>>::LeafData as primitives::LeafDataProvider>::LeafData;
+type LeafOf<T, I> = <<T as Config<I>>::LeafData as LeafDataProvider>::LeafData;
 
 /// Hashing used for the pallet.
 pub(crate) type HashingOf<T, I> = <T as Config<I>>::Hashing;
 /// Hash type used for the pallet.
-pub(crate) type HashOf<T, I> = <<T as Config<I>>::Hashing as traits::Hash>::Output;
+pub(crate) type HashOf<T, I> = <<T as Config<I>>::Hashing as Hash>::Output;
 
-#[frame_support::pallet]
+#[frame::pallet]
 pub mod pallet {
 	use super::*;
-	use frame_support::pallet_prelude::*;
 
 	#[pallet::pallet]
 	pub struct Pallet<T, I = ()>(PhantomData<(T, I)>);
@@ -180,7 +174,7 @@ pub mod pallet {
 		///
 		/// Then we create a tuple of these two hashes, SCALE-encode it (concatenate) and
 		/// hash, to obtain a new MMR inner node - the new peak.
-		type Hashing: traits::Hash;
+		type Hashing: Hash;
 
 		/// Data stored in the leaf nodes.
 		///
@@ -198,7 +192,7 @@ pub mod pallet {
 		/// two forks with identical line of ancestors compete to write the same offchain key, but
 		/// that's fine as long as leaves only contain data coming from ancestors - conflicting
 		/// writes are identical).
-		type LeafData: primitives::LeafDataProvider;
+		type LeafData: LeafDataProvider;
 
 		/// A hook to act on the new MMR root.
 		///
@@ -206,7 +200,7 @@ pub mod pallet {
 		/// apart from having it in the storage. For instance you might output it in the header
 		/// digest (see [`frame_system::Pallet::deposit_log`]) to make it available for Light
 		/// Clients. Hook complexity should be `O(1)`.
-		type OnNewRoot: primitives::OnNewRoot<HashOf<Self, I>>;
+		type OnNewRoot: OnNewRoot<HashOf<Self, I>>;
 
 		/// Block hash provider for a given block number.
 		type BlockHashProvider: BlockHashProvider<
@@ -248,9 +242,8 @@ pub mod pallet {
 	#[pallet::hooks]
 	impl<T: Config<I>, I: 'static> Hooks<BlockNumberFor<T>> for Pallet<T, I> {
 		fn on_initialize(_n: BlockNumberFor<T>) -> Weight {
-			use primitives::LeafDataProvider;
 			let leaves = NumberOfLeaves::<T, I>::get();
-			let peaks_before = sp_mmr_primitives::utils::NodesUtils::new(leaves).number_of_peaks();
+			let peaks_before = NodesUtils::new(leaves).number_of_peaks();
 			let data = T::LeafData::leaf_data();
 
 			// append new leaf to MMR
@@ -268,12 +261,12 @@ pub mod pallet {
 					return T::WeightInfo::on_initialize(peaks_before as u32)
 				},
 			};
-			<T::OnNewRoot as primitives::OnNewRoot<_>>::on_new_root(&root);
+			<T::OnNewRoot as OnNewRoot<_>>::on_new_root(&root);
 
 			NumberOfLeaves::<T, I>::put(leaves);
 			RootHash::<T, I>::put(root);
 
-			let peaks_after = sp_mmr_primitives::utils::NodesUtils::new(leaves).number_of_peaks();
+			let peaks_after = NodesUtils::new(leaves).number_of_peaks();
 
 			T::WeightInfo::on_initialize(peaks_before.max(peaks_after) as u32)
 		}
@@ -290,28 +283,28 @@ pub mod pallet {
 pub fn verify_leaves_proof<H, L>(
 	root: H::Output,
 	leaves: Vec<mmr::Node<H, L>>,
-	proof: primitives::LeafProof<H::Output>,
-) -> Result<(), primitives::Error>
+	proof: LeafProof<H::Output>,
+) -> Result<(), Error>
 where
-	H: traits::Hash,
-	L: primitives::FullLeaf,
+	H: Hash,
+	L: FullLeaf,
 {
 	let is_valid = mmr::verify_leaves_proof::<H, L>(root, leaves, proof)?;
 	if is_valid {
 		Ok(())
 	} else {
-		Err(primitives::Error::Verify.log_debug(("The proof is incorrect.", root)))
+		Err(Error::Verify.log_debug(("The proof is incorrect.", root)))
 	}
 }
 
 /// Stateless ancestry proof verification.
 pub fn verify_ancestry_proof<H, L>(
 	root: H::Output,
-	ancestry_proof: primitives::AncestryProof<H::Output>,
+	ancestry_proof: AncestryProof<H::Output>,
 ) -> Result<H::Output, Error>
 where
-	H: traits::Hash,
-	L: primitives::FullLeaf,
+	H: Hash,
+	L: FullLeaf,
 {
 	mmr::verify_ancestry_proof::<H, L>(root, ancestry_proof)
 		.map_err(|_| Error::Verify.log_debug(("The ancestry proof is incorrect.", root)))
@@ -383,7 +376,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 	pub fn generate_proof(
 		block_numbers: Vec<BlockNumberFor<T>>,
 		best_known_block_number: Option<BlockNumberFor<T>>,
-	) -> Result<(Vec<LeafOf<T, I>>, primitives::LeafProof<HashOf<T, I>>), primitives::Error> {
+	) -> Result<(Vec<LeafOf<T, I>>, LeafProof<HashOf<T, I>>), Error> {
 		// check whether best_known_block_number provided, else use current best block
 		let best_known_block_number =
 			best_known_block_number.unwrap_or_else(|| <frame_system::Pallet<T>>::block_number());
@@ -393,7 +386,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 		// we need to translate the block_numbers into leaf indices.
 		let leaf_indices = block_numbers
 			.iter()
-			.map(|block_num| -> Result<LeafIndex, primitives::Error> {
+			.map(|block_num| -> Result<LeafIndex, Error> {
 				Self::block_num_to_leaf_index(*block_num)
 			})
 			.collect::<Result<Vec<LeafIndex>, _>>()?;
@@ -410,14 +403,15 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 	/// or the proof is invalid.
 	pub fn verify_leaves(
 		leaves: Vec<LeafOf<T, I>>,
-		proof: primitives::LeafProof<HashOf<T, I>>,
-	) -> Result<(), primitives::Error> {
+		proof: LeafProof<HashOf<T, I>>,
+	) -> Result<(), Error> {
 		if proof.leaf_count > NumberOfLeaves::<T, I>::get() ||
 			proof.leaf_count == 0 ||
 			proof.items.len().saturating_add(leaves.len()) as u64 > proof.leaf_count
 		{
-			return Err(primitives::Error::Verify
-				.log_debug("The proof has incorrect number of leaves or proof items."))
+			return Err(
+				Error::Verify.log_debug("The proof has incorrect number of leaves or proof items.")
+			)
 		}
 
 		let mmr: ModuleMmr<mmr::storage::OffchainStorage, T, I> = mmr::Mmr::new(proof.leaf_count);
@@ -425,14 +419,14 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 		if is_valid {
 			Ok(())
 		} else {
-			Err(primitives::Error::Verify.log_debug("The proof is incorrect."))
+			Err(Error::Verify.log_debug("The proof is incorrect."))
 		}
 	}
 
 	pub fn generate_ancestry_proof(
 		prev_block_number: BlockNumberFor<T>,
 		best_known_block_number: Option<BlockNumberFor<T>>,
-	) -> Result<primitives::AncestryProof<HashOf<T, I>>, Error> {
+	) -> Result<AncestryProof<HashOf<T, I>>, Error> {
 		// check whether best_known_block_number provided, else use current best block
 		let best_known_block_number =
 			best_known_block_number.unwrap_or_else(|| <frame_system::Pallet<T>>::block_number());
@@ -445,8 +439,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 	}
 
 	#[cfg(feature = "runtime-benchmarks")]
-	pub fn generate_mock_ancestry_proof() -> Result<primitives::AncestryProof<HashOf<T, I>>, Error>
-	{
+	pub fn generate_mock_ancestry_proof() -> Result<AncestryProof<HashOf<T, I>>, Error> {
 		let leaf_count = Self::block_num_to_leaf_count(<frame_system::Pallet<T>>::block_number())?;
 		let mmr: ModuleMmr<mmr::storage::OffchainStorage, T, I> = mmr::Mmr::new(leaf_count);
 		mmr.generate_mock_ancestry_proof()
@@ -454,7 +447,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 
 	pub fn verify_ancestry_proof(
 		root: HashOf<T, I>,
-		ancestry_proof: primitives::AncestryProof<HashOf<T, I>>,
+		ancestry_proof: AncestryProof<HashOf<T, I>>,
 	) -> Result<HashOf<T, I>, Error> {
 		verify_ancestry_proof::<HashingOf<T, I>, LeafOf<T, I>>(root, ancestry_proof)
 	}
diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs
index f9a4580b9bb..a9818ba4710 100644
--- a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs
+++ b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs
@@ -20,11 +20,14 @@ use crate::{
 		storage::{OffchainStorage, RuntimeStorage, Storage},
 		Hasher, Node, NodeOf,
 	},
-	primitives::{self, Error, NodeIndex},
+	primitives::{
+		mmr_lib, mmr_lib::MMRStoreReadOps, utils::NodesUtils, AncestryProof, Error, FullLeaf,
+		LeafIndex, LeafProof, NodeIndex,
+	},
 	Config, HashOf, HashingOf,
 };
 use alloc::vec::Vec;
-use sp_mmr_primitives::{mmr_lib, mmr_lib::MMRStoreReadOps, utils::NodesUtils, LeafIndex};
+use frame::prelude::*;
 
 /// Stateless verification of the proof for a batch of leaves.
 /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the
@@ -33,11 +36,11 @@ use sp_mmr_primitives::{mmr_lib, mmr_lib::MMRStoreReadOps, utils::NodesUtils, Le
 pub fn verify_leaves_proof<H, L>(
 	root: H::Output,
 	leaves: Vec<Node<H, L>>,
-	proof: primitives::LeafProof<H::Output>,
+	proof: LeafProof<H::Output>,
 ) -> Result<bool, Error>
 where
-	H: sp_runtime::traits::Hash,
-	L: primitives::FullLeaf,
+	H: Hash,
+	L: FullLeaf,
 {
 	let size = NodesUtils::new(proof.leaf_count).size();
 
@@ -62,11 +65,11 @@ where
 
 pub fn verify_ancestry_proof<H, L>(
 	root: H::Output,
-	ancestry_proof: primitives::AncestryProof<H::Output>,
+	ancestry_proof: AncestryProof<H::Output>,
 ) -> Result<H::Output, Error>
 where
-	H: sp_runtime::traits::Hash,
-	L: primitives::FullLeaf,
+	H: Hash,
+	L: FullLeaf,
 {
 	let mmr_size = NodesUtils::new(ancestry_proof.leaf_count).size();
 
@@ -104,7 +107,7 @@ pub struct Mmr<StorageType, T, I, L>
 where
 	T: Config<I>,
 	I: 'static,
-	L: primitives::FullLeaf,
+	L: FullLeaf,
 	Storage<StorageType, T, I, L>:
 		MMRStoreReadOps<NodeOf<T, I, L>> + mmr_lib::MMRStoreWriteOps<NodeOf<T, I, L>>,
 {
@@ -116,7 +119,7 @@ impl<StorageType, T, I, L> Mmr<StorageType, T, I, L>
 where
 	T: Config<I>,
 	I: 'static,
-	L: primitives::FullLeaf,
+	L: FullLeaf,
 	Storage<StorageType, T, I, L>:
 		MMRStoreReadOps<NodeOf<T, I, L>> + mmr_lib::MMRStoreWriteOps<NodeOf<T, I, L>>,
 {
@@ -133,7 +136,7 @@ where
 	pub fn verify_leaves_proof(
 		&self,
 		leaves: Vec<L>,
-		proof: primitives::LeafProof<HashOf<T, I>>,
+		proof: LeafProof<HashOf<T, I>>,
 	) -> Result<bool, Error> {
 		let p = mmr_lib::MerkleProof::<NodeOf<T, I, L>, Hasher<HashingOf<T, I>, L>>::new(
 			self.mmr.mmr_size(),
@@ -167,7 +170,7 @@ impl<T, I, L> Mmr<RuntimeStorage, T, I, L>
 where
 	T: Config<I>,
 	I: 'static,
-	L: primitives::FullLeaf,
+	L: FullLeaf,
 {
 	/// Push another item to the MMR.
 	///
@@ -195,7 +198,7 @@ impl<T, I, L> Mmr<OffchainStorage, T, I, L>
 where
 	T: Config<I>,
 	I: 'static,
-	L: primitives::FullLeaf + codec::Decode,
+	L: FullLeaf + codec::Decode,
 {
 	/// Generate a proof for given leaf indices.
 	///
@@ -204,7 +207,7 @@ where
 	pub fn generate_proof(
 		&self,
 		leaf_indices: Vec<NodeIndex>,
-	) -> Result<(Vec<L>, primitives::LeafProof<HashOf<T, I>>), Error> {
+	) -> Result<(Vec<L>, LeafProof<HashOf<T, I>>), Error> {
 		let positions = leaf_indices
 			.iter()
 			.map(|index| mmr_lib::leaf_index_to_pos(*index))
@@ -222,7 +225,7 @@ where
 		self.mmr
 			.gen_proof(positions)
 			.map_err(|e| Error::GenerateProof.log_error(e))
-			.map(|p| primitives::LeafProof {
+			.map(|p| LeafProof {
 				leaf_indices,
 				leaf_count,
 				items: p.proof_items().iter().map(|x| x.hash()).collect(),
@@ -233,14 +236,14 @@ where
 	pub fn generate_ancestry_proof(
 		&self,
 		prev_leaf_count: LeafIndex,
-	) -> Result<primitives::AncestryProof<HashOf<T, I>>, Error> {
+	) -> Result<AncestryProof<HashOf<T, I>>, Error> {
 		let prev_mmr_size = NodesUtils::new(prev_leaf_count).size();
 		let raw_ancestry_proof = self
 			.mmr
 			.gen_ancestry_proof(prev_mmr_size)
 			.map_err(|e| Error::GenerateProof.log_error(e))?;
 
-		Ok(primitives::AncestryProof {
+		Ok(AncestryProof {
 			prev_peaks: raw_ancestry_proof.prev_peaks.into_iter().map(|p| p.hash()).collect(),
 			prev_leaf_count,
 			leaf_count: self.leaves,
@@ -258,12 +261,10 @@ where
 	/// The generated proof contains all the leafs in the MMR, so this way we can generate a proof
 	/// with exactly `leaf_count` items.
 	#[cfg(feature = "runtime-benchmarks")]
-	pub fn generate_mock_ancestry_proof(
-		&self,
-	) -> Result<sp_mmr_primitives::AncestryProof<HashOf<T, I>>, Error> {
+	pub fn generate_mock_ancestry_proof(&self) -> Result<AncestryProof<HashOf<T, I>>, Error> {
 		use crate::ModuleMmr;
 		use alloc::vec;
-		use sp_mmr_primitives::mmr_lib::helper;
+		use mmr_lib::helper;
 
 		let mmr: ModuleMmr<OffchainStorage, T, I> = Mmr::new(self.leaves);
 		let store = <Storage<OffchainStorage, T, I, L>>::default();
@@ -289,7 +290,7 @@ where
 			proof_items.push((leaf_pos, leaf));
 		}
 
-		Ok(sp_mmr_primitives::AncestryProof {
+		Ok(AncestryProof {
 			prev_peaks,
 			prev_leaf_count: self.leaves,
 			leaf_count: self.leaves,
diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mod.rs b/substrate/frame/merkle-mountain-range/src/mmr/mod.rs
index 5b73f53506e..85d00f8a65d 100644
--- a/substrate/frame/merkle-mountain-range/src/mmr/mod.rs
+++ b/substrate/frame/merkle-mountain-range/src/mmr/mod.rs
@@ -18,10 +18,9 @@
 mod mmr;
 pub mod storage;
 
-use sp_mmr_primitives::{mmr_lib, DataOrHash, FullLeaf};
-use sp_runtime::traits;
-
 pub use self::mmr::{verify_ancestry_proof, verify_leaves_proof, Mmr};
+use crate::primitives::{mmr_lib, DataOrHash, FullLeaf};
+use frame::traits;
 
 /// Node type for runtime `T`.
 pub type NodeOf<T, I, L> = Node<<T as crate::Config<I>>::Hashing, L>;
diff --git a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs
index 02852388b41..c201c0ea846 100644
--- a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs
+++ b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs
@@ -17,18 +17,22 @@
 
 //! An MMR storage implementation.
 
-use alloc::{vec, vec::Vec};
-use codec::Encode;
-use core::iter::Peekable;
-use log::{debug, trace};
-use sp_core::offchain::StorageKind;
-use sp_mmr_primitives::{mmr_lib, mmr_lib::helper, utils::NodesUtils};
-
 use crate::{
 	mmr::{Node, NodeOf},
-	primitives::{self, NodeIndex},
+	primitives::{mmr_lib, mmr_lib::helper, utils::NodesUtils, FullLeaf, NodeIndex},
 	BlockHashProvider, Config, Nodes, NumberOfLeaves, Pallet,
 };
+use alloc::{vec, vec::Vec};
+use codec::Encode;
+use core::iter::Peekable;
+use frame::{
+	deps::{
+		sp_core::offchain::StorageKind,
+		sp_io::{offchain, offchain_index},
+	},
+	prelude::*,
+};
+use log::{debug, trace};
 
 /// A marker type for runtime-specific storage implementation.
 ///
@@ -48,20 +52,20 @@ pub struct OffchainStorage;
 
 impl OffchainStorage {
 	fn get(key: &[u8]) -> Option<Vec<u8>> {
-		sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key)
+		offchain::local_storage_get(StorageKind::PERSISTENT, &key)
 	}
 
 	#[cfg(not(feature = "runtime-benchmarks"))]
 	fn set<T: Config<I>, I: 'static>(key: &[u8], value: &[u8]) {
-		sp_io::offchain_index::set(key, value);
+		offchain_index::set(key, value);
 	}
 
 	#[cfg(feature = "runtime-benchmarks")]
 	fn set<T: Config<I>, I: 'static>(key: &[u8], value: &[u8]) {
 		if crate::pallet::UseLocalStorage::<T, I>::get() {
-			sp_io::offchain::local_storage_set(StorageKind::PERSISTENT, key, value);
+			offchain::local_storage_set(StorageKind::PERSISTENT, key, value);
 		} else {
-			sp_io::offchain_index::set(key, value);
+			offchain_index::set(key, value);
 		}
 	}
 }
@@ -82,7 +86,7 @@ impl<T, I, L> mmr_lib::MMRStoreReadOps<NodeOf<T, I, L>> for Storage<OffchainStor
 where
 	T: Config<I>,
 	I: 'static,
-	L: primitives::FullLeaf + codec::Decode,
+	L: FullLeaf + Decode,
 {
 	fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result<Option<NodeOf<T, I, L>>> {
 		// Find out which leaf added node `pos` in the MMR.
@@ -120,7 +124,7 @@ impl<T, I, L> mmr_lib::MMRStoreWriteOps<NodeOf<T, I, L>> for Storage<OffchainSto
 where
 	T: Config<I>,
 	I: 'static,
-	L: primitives::FullLeaf + codec::Decode,
+	L: FullLeaf + Decode,
 {
 	fn append(&mut self, _: NodeIndex, _: Vec<NodeOf<T, I, L>>) -> mmr_lib::Result<()> {
 		panic!("MMR must not be altered in the off-chain context.")
@@ -131,7 +135,7 @@ impl<T, I, L> mmr_lib::MMRStoreReadOps<NodeOf<T, I, L>> for Storage<RuntimeStora
 where
 	T: Config<I>,
 	I: 'static,
-	L: primitives::FullLeaf,
+	L: FullLeaf,
 {
 	fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result<Option<NodeOf<T, I, L>>> {
 		Ok(Nodes::<T, I>::get(pos).map(Node::Hash))
@@ -142,7 +146,7 @@ impl<T, I, L> mmr_lib::MMRStoreWriteOps<NodeOf<T, I, L>> for Storage<RuntimeStor
 where
 	T: Config<I>,
 	I: 'static,
-	L: primitives::FullLeaf,
+	L: FullLeaf,
 {
 	fn append(&mut self, pos: NodeIndex, elems: Vec<NodeOf<T, I, L>>) -> mmr_lib::Result<()> {
 		if elems.is_empty() {
@@ -205,7 +209,7 @@ impl<T, I, L> Storage<RuntimeStorage, T, I, L>
 where
 	T: Config<I>,
 	I: 'static,
-	L: primitives::FullLeaf,
+	L: FullLeaf,
 {
 	fn store_to_offchain(
 		pos: NodeIndex,
diff --git a/substrate/frame/merkle-mountain-range/src/mock.rs b/substrate/frame/merkle-mountain-range/src/mock.rs
index 606719c6deb..4c234e0d94a 100644
--- a/substrate/frame/merkle-mountain-range/src/mock.rs
+++ b/substrate/frame/merkle-mountain-range/src/mock.rs
@@ -18,14 +18,20 @@
 use crate as pallet_mmr;
 use crate::*;
 
+use crate::{
+	frame_system::DefaultConfig,
+	primitives::{Compact, LeafDataProvider},
+};
 use codec::{Decode, Encode};
-use frame_support::{derive_impl, parameter_types};
-use sp_mmr_primitives::{Compact, LeafDataProvider};
-use sp_runtime::traits::Keccak256;
+use frame::{
+	deps::frame_support::derive_impl,
+	prelude::{frame_system, frame_system::config_preludes::TestDefaultConfig},
+	testing_prelude::*,
+};
 
-type Block = frame_system::mocking::MockBlock<Test>;
+type Block = MockBlock<Test>;
 
-frame_support::construct_runtime!(
+construct_runtime!(
 	pub enum Test
 	{
 		System: frame_system,
@@ -33,7 +39,7 @@ frame_support::construct_runtime!(
 	}
 );
 
-#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
+#[derive_impl(TestDefaultConfig)]
 impl frame_system::Config for Test {
 	type Block = Block;
 }
diff --git a/substrate/frame/merkle-mountain-range/src/tests.rs b/substrate/frame/merkle-mountain-range/src/tests.rs
index 93e3d06eaa0..ae0c58e91ab 100644
--- a/substrate/frame/merkle-mountain-range/src/tests.rs
+++ b/substrate/frame/merkle-mountain-range/src/tests.rs
@@ -17,19 +17,21 @@
 
 use crate::{mock::*, *};
 
-use frame_support::traits::{Get, OnInitialize};
-use sp_core::{
-	offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt},
-	H256,
+use crate::primitives::{mmr_lib::helper, utils, Compact, LeafProof};
+
+use frame::{
+	deps::sp_core::{
+		offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt},
+		H256,
+	},
+	testing_prelude::*,
 };
-use sp_mmr_primitives::{mmr_lib::helper, utils, Compact, LeafProof};
-use sp_runtime::BuildStorage;
 
-pub(crate) fn new_test_ext() -> sp_io::TestExternalities {
+pub(crate) fn new_test_ext() -> TestState {
 	frame_system::GenesisConfig::<Test>::default().build_storage().unwrap().into()
 }
 
-fn register_offchain_ext(ext: &mut sp_io::TestExternalities) {
+fn register_offchain_ext(ext: &mut TestState) {
 	let (offchain, _offchain_state) = TestOffchainExt::with_offchain_db(ext.offchain_db());
 	ext.register_extension(OffchainDbExt::new(offchain.clone()));
 	ext.register_extension(OffchainWorkerExt::new(offchain));
@@ -54,7 +56,7 @@ pub(crate) fn hex(s: &str) -> H256 {
 	s.parse().unwrap()
 }
 
-type BlockNumber = frame_system::pallet_prelude::BlockNumberFor<Test>;
+type BlockNumber = BlockNumberFor<Test>;
 
 fn decode_node(
 	v: Vec<u8>,
@@ -517,7 +519,7 @@ fn should_verify() {
 }
 
 fn generate_and_verify_batch_proof(
-	ext: &mut sp_io::TestExternalities,
+	ext: &mut TestExternalities,
 	block_numbers: &Vec<u64>,
 	blocks_to_add: usize,
 ) {
@@ -719,7 +721,6 @@ fn should_verify_on_the_next_block_since_there_is_no_pruning_yet() {
 
 #[test]
 fn should_verify_canonicalized() {
-	use frame_support::traits::Hooks;
 	sp_tracing::init_for_tests();
 
 	// How deep is our fork-aware storage (in terms of blocks/leaves, nodes will be more).
-- 
GitLab


From 711e6ff33373bc08b026446ce19b73920bfe068c Mon Sep 17 00:00:00 2001
From: runcomet <runcomet@protonmail.com>
Date: Mon, 20 Jan 2025 08:12:44 -0800
Subject: [PATCH 085/116] Migrate `pallet-assets-freezer` to umbrella crate
 (#6599)

Part of https://github.com/paritytech/polkadot-sdk/issues/6504

### Added modules

- `utility`: Traits not tied to any direct operation in the runtime.

polkadot address: 14SRqZTC1d8rfxL8W1tBTnfUBPU23ACFVPzp61FyGf4ftUFg

---------

Co-authored-by: Giuseppe Re <giuseppe.re@parity.io>
---
 Cargo.lock                                  |  7 +---
 substrate/frame/assets-freezer/Cargo.toml   | 23 ++---------
 substrate/frame/assets-freezer/src/impls.rs | 12 ++----
 substrate/frame/assets-freezer/src/lib.rs   | 43 +++++++++++----------
 substrate/frame/assets-freezer/src/mock.rs  | 23 ++++-------
 substrate/frame/assets-freezer/src/tests.rs | 16 ++------
 substrate/frame/src/lib.rs                  | 29 ++++++++++----
 7 files changed, 64 insertions(+), 89 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock
index 50d36338cd2..397d0c7fe82 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -12175,17 +12175,12 @@ dependencies = [
 name = "pallet-assets-freezer"
 version = "0.1.0"
 dependencies = [
- "frame-benchmarking 28.0.0",
- "frame-support 28.0.0",
- "frame-system 28.0.0",
  "log",
  "pallet-assets 29.1.0",
  "pallet-balances 28.0.0",
  "parity-scale-codec",
+ "polkadot-sdk-frame 0.1.0",
  "scale-info",
- "sp-core 28.0.0",
- "sp-io 30.0.0",
- "sp-runtime 31.0.1",
 ]
 
 [[package]]
diff --git a/substrate/frame/assets-freezer/Cargo.toml b/substrate/frame/assets-freezer/Cargo.toml
index 3fffa4d0627..d8c0ee6e442 100644
--- a/substrate/frame/assets-freezer/Cargo.toml
+++ b/substrate/frame/assets-freezer/Cargo.toml
@@ -16,46 +16,31 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { workspace = true }
-frame-benchmarking = { optional = true, workspace = true }
-frame-support = { workspace = true }
-frame-system = { workspace = true }
+frame = { workspace = true, features = ["runtime"] }
 log = { workspace = true }
 pallet-assets = { workspace = true }
 scale-info = { features = ["derive"], workspace = true }
-sp-runtime = { workspace = true }
 
 [dev-dependencies]
 pallet-balances = { workspace = true }
-sp-core = { workspace = true }
-sp-io = { workspace = true }
 
 [features]
 default = ["std"]
 std = [
 	"codec/std",
-	"frame-benchmarking?/std",
-	"frame-support/std",
-	"frame-system/std",
+	"frame/std",
 	"log/std",
 	"pallet-assets/std",
 	"pallet-balances/std",
 	"scale-info/std",
-	"sp-core/std",
-	"sp-io/std",
-	"sp-runtime/std",
 ]
 runtime-benchmarks = [
-	"frame-benchmarking/runtime-benchmarks",
-	"frame-support/runtime-benchmarks",
-	"frame-system/runtime-benchmarks",
+	"frame/runtime-benchmarks",
 	"pallet-assets/runtime-benchmarks",
 	"pallet-balances/runtime-benchmarks",
-	"sp-runtime/runtime-benchmarks",
 ]
 try-runtime = [
-	"frame-support/try-runtime",
-	"frame-system/try-runtime",
+	"frame/try-runtime",
 	"pallet-assets/try-runtime",
 	"pallet-balances/try-runtime",
-	"sp-runtime/try-runtime",
 ]
diff --git a/substrate/frame/assets-freezer/src/impls.rs b/substrate/frame/assets-freezer/src/impls.rs
index cd383f1c3cd..8c9f148e1e9 100644
--- a/substrate/frame/assets-freezer/src/impls.rs
+++ b/substrate/frame/assets-freezer/src/impls.rs
@@ -16,13 +16,7 @@
 // limitations under the License.
 
 use super::*;
-
-use frame_support::traits::{
-	fungibles::{Inspect, InspectFreeze, MutateFreeze},
-	tokens::{DepositConsequence, Fortitude, Preservation, Provenance, WithdrawConsequence},
-};
 use pallet_assets::FrozenBalance;
-use sp_runtime::traits::Zero;
 
 // Implements [`FrozenBalance`] from [`pallet-assets`], so it can understand how much of an
 // account balance is frozen, and is able to signal to this pallet when to clear the state of an
@@ -115,7 +109,7 @@ impl<T: Config<I>, I: 'static> MutateFreeze<T::AccountId> for Pallet<T, I> {
 		id: &Self::Id,
 		who: &T::AccountId,
 		amount: Self::Balance,
-	) -> sp_runtime::DispatchResult {
+	) -> DispatchResult {
 		if amount.is_zero() {
 			return Self::thaw(asset, id, who);
 		}
@@ -135,7 +129,7 @@ impl<T: Config<I>, I: 'static> MutateFreeze<T::AccountId> for Pallet<T, I> {
 		id: &Self::Id,
 		who: &T::AccountId,
 		amount: Self::Balance,
-	) -> sp_runtime::DispatchResult {
+	) -> DispatchResult {
 		if amount.is_zero() {
 			return Ok(());
 		}
@@ -150,7 +144,7 @@ impl<T: Config<I>, I: 'static> MutateFreeze<T::AccountId> for Pallet<T, I> {
 		Self::update_freezes(asset, who, freezes.as_bounded_slice())
 	}
 
-	fn thaw(asset: Self::AssetId, id: &Self::Id, who: &T::AccountId) -> sp_runtime::DispatchResult {
+	fn thaw(asset: Self::AssetId, id: &Self::Id, who: &T::AccountId) -> DispatchResult {
 		let mut freezes = Freezes::<T, I>::get(asset.clone(), who);
 		freezes.retain(|f| &f.id != id);
 		Self::update_freezes(asset, who, freezes.as_bounded_slice())
diff --git a/substrate/frame/assets-freezer/src/lib.rs b/substrate/frame/assets-freezer/src/lib.rs
index b42d41ac1d9..5f718ed8482 100644
--- a/substrate/frame/assets-freezer/src/lib.rs
+++ b/substrate/frame/assets-freezer/src/lib.rs
@@ -18,10 +18,10 @@
 //! # Assets Freezer Pallet
 //!
 //! A pallet capable of freezing fungibles from `pallet-assets`. This is an extension of
-//! `pallet-assets`, wrapping [`fungibles::Inspect`](`frame_support::traits::fungibles::Inspect`).
+//! `pallet-assets`, wrapping [`fungibles::Inspect`](`Inspect`).
 //! It implements both
-//! [`fungibles::freeze::Inspect`](frame_support::traits::fungibles::freeze::Inspect) and
-//! [`fungibles::freeze::Mutate`](frame_support::traits::fungibles::freeze::Mutate). The complexity
+//! [`fungibles::freeze::Inspect`](InspectFreeze) and
+//! [`fungibles::freeze::Mutate`](MutateFreeze). The complexity
 //! of the operations is `O(n)`. where `n` is the variant count of `RuntimeFreezeReason`.
 //!
 //! ## Pallet API
@@ -35,26 +35,27 @@
 //!
 //! - Pallet hooks allowing [`pallet-assets`] to know the frozen balance for an account on a given
 //!   asset (see [`pallet_assets::FrozenBalance`]).
-//! - An implementation of
-//!   [`fungibles::freeze::Inspect`](frame_support::traits::fungibles::freeze::Inspect) and
-//!   [`fungibles::freeze::Mutate`](frame_support::traits::fungibles::freeze::Mutate), allowing
-//!   other pallets to manage freezes for the `pallet-assets` assets.
+//! - An implementation of [`fungibles::freeze::Inspect`](InspectFreeze) and
+//!   [`fungibles::freeze::Mutate`](MutateFreeze), allowing other pallets to manage freezes for the
+//!   `pallet-assets` assets.
 
 #![cfg_attr(not(feature = "std"), no_std)]
 
-use frame_support::{
-	pallet_prelude::*,
-	traits::{tokens::IdAmount, VariantCount, VariantCountOf},
-	BoundedVec,
-};
-use frame_system::pallet_prelude::BlockNumberFor;
-use sp_runtime::{
-	traits::{Saturating, Zero},
-	BoundedSlice,
+use frame::{
+	prelude::*,
+	traits::{
+		fungibles::{Inspect, InspectFreeze, MutateFreeze},
+		tokens::{
+			DepositConsequence, Fortitude, IdAmount, Preservation, Provenance, WithdrawConsequence,
+		},
+	},
 };
 
 pub use pallet::*;
 
+#[cfg(feature = "try-runtime")]
+use frame::try_runtime::TryRuntimeError;
+
 #[cfg(test)]
 mod mock;
 #[cfg(test)]
@@ -62,7 +63,7 @@ mod tests;
 
 mod impls;
 
-#[frame_support::pallet]
+#[frame::pallet]
 pub mod pallet {
 	use super::*;
 
@@ -125,7 +126,7 @@ pub mod pallet {
 	#[pallet::hooks]
 	impl<T: Config<I>, I: 'static> Hooks<BlockNumberFor<T>> for Pallet<T, I> {
 		#[cfg(feature = "try-runtime")]
-		fn try_state(_: BlockNumberFor<T>) -> Result<(), sp_runtime::TryRuntimeError> {
+		fn try_state(_: BlockNumberFor<T>) -> Result<(), TryRuntimeError> {
 			Self::do_try_state()
 		}
 	}
@@ -159,13 +160,13 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 		Ok(())
 	}
 
-	#[cfg(any(test, feature = "try-runtime"))]
-	fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> {
+	#[cfg(feature = "try-runtime")]
+	fn do_try_state() -> Result<(), TryRuntimeError> {
 		for (asset, who, _) in FrozenBalances::<T, I>::iter() {
 			let max_frozen_amount =
 				Freezes::<T, I>::get(asset.clone(), who.clone()).iter().map(|l| l.amount).max();
 
-			frame_support::ensure!(
+			ensure!(
 				FrozenBalances::<T, I>::get(asset, who) == max_frozen_amount,
 				"The `FrozenAmount` is not equal to the maximum amount in `Freezes` for (`asset`, `who`)"
 			);
diff --git a/substrate/frame/assets-freezer/src/mock.rs b/substrate/frame/assets-freezer/src/mock.rs
index bc903a018f7..ad08787aba2 100644
--- a/substrate/frame/assets-freezer/src/mock.rs
+++ b/substrate/frame/assets-freezer/src/mock.rs
@@ -20,23 +20,15 @@
 use crate as pallet_assets_freezer;
 pub use crate::*;
 use codec::{Compact, Decode, Encode, MaxEncodedLen};
-use frame_support::{
-	derive_impl,
-	traits::{AsEnsureOriginWithArg, ConstU64},
-};
+use frame::testing_prelude::*;
 use scale_info::TypeInfo;
-use sp_core::{ConstU32, H256};
-use sp_runtime::{
-	traits::{BlakeTwo256, IdentityLookup},
-	BuildStorage,
-};
 
 pub type AccountId = u64;
 pub type Balance = u64;
 pub type AssetId = u32;
 type Block = frame_system::mocking::MockBlock<Test>;
 
-frame_support::construct_runtime!(
+construct_runtime!(
 	pub enum Test
 	{
 		System: frame_system,
@@ -48,7 +40,7 @@ frame_support::construct_runtime!(
 
 #[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
 impl frame_system::Config for Test {
-	type BaseCallFilter = frame_support::traits::Everything;
+	type BaseCallFilter = Everything;
 	type BlockWeights = ();
 	type BlockLength = ();
 	type DbWeight = ();
@@ -70,7 +62,7 @@ impl frame_system::Config for Test {
 	type SystemWeightInfo = ();
 	type SS58Prefix = ();
 	type OnSetCode = ();
-	type MaxConsumers = frame_support::traits::ConstU32<16>;
+	type MaxConsumers = ConstU32<16>;
 }
 
 impl pallet_balances::Config for Test {
@@ -132,7 +124,7 @@ impl Config for Test {
 	type RuntimeEvent = RuntimeEvent;
 }
 
-pub fn new_test_ext(execute: impl FnOnce()) -> sp_io::TestExternalities {
+pub fn new_test_ext(execute: impl FnOnce()) -> TestExternalities {
 	let t = RuntimeGenesisConfig {
 		assets: pallet_assets::GenesisConfig {
 			assets: vec![(1, 0, true, 1)],
@@ -145,11 +137,12 @@ pub fn new_test_ext(execute: impl FnOnce()) -> sp_io::TestExternalities {
 	}
 	.build_storage()
 	.unwrap();
-	let mut ext: sp_io::TestExternalities = t.into();
+	let mut ext: TestExternalities = t.into();
 	ext.execute_with(|| {
 		System::set_block_number(1);
 		execute();
-		frame_support::assert_ok!(AssetsFreezer::do_try_state());
+		#[cfg(feature = "try-runtime")]
+		assert_ok!(AssetsFreezer::do_try_state());
 	});
 
 	ext
diff --git a/substrate/frame/assets-freezer/src/tests.rs b/substrate/frame/assets-freezer/src/tests.rs
index 4f2dea79c70..b890dc98b57 100644
--- a/substrate/frame/assets-freezer/src/tests.rs
+++ b/substrate/frame/assets-freezer/src/tests.rs
@@ -17,22 +17,16 @@
 
 //! Tests for pallet-assets-freezer.
 
-use crate::mock::*;
+use crate::mock::{self, *};
 
 use codec::Compact;
-use frame_support::{
-	assert_ok, assert_storage_noop,
-	traits::{
-		fungibles::{Inspect, InspectFreeze, MutateFreeze},
-		tokens::{Fortitude, Preservation},
-	},
-};
+use frame::testing_prelude::*;
 use pallet_assets::FrozenBalance;
 
 const WHO: AccountId = 1;
-const ASSET_ID: AssetId = 1;
+const ASSET_ID: mock::AssetId = 1;
 
-fn test_set_freeze(id: DummyFreezeReason, amount: Balance) {
+fn test_set_freeze(id: DummyFreezeReason, amount: mock::Balance) {
 	let mut freezes = Freezes::<Test>::get(ASSET_ID, WHO);
 
 	if let Some(i) = freezes.iter_mut().find(|l| l.id == id) {
@@ -281,8 +275,6 @@ mod impl_mutate_freeze {
 }
 
 mod with_pallet_assets {
-	use frame_support::assert_noop;
-
 	use super::*;
 
 	#[test]
diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs
index 18c7bd12394..1c4b2ed5b82 100644
--- a/substrate/frame/src/lib.rs
+++ b/substrate/frame/src/lib.rs
@@ -202,12 +202,10 @@ pub mod prelude {
 	/// Dispatch types from `frame-support`, other fundamental traits
 	#[doc(no_inline)]
 	pub use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo};
-	pub use frame_support::{
-		defensive, defensive_assert,
-		traits::{
-			Contains, EitherOf, EstimateNextSessionRotation, IsSubType, MapSuccess, NoOpPoll,
-			OnRuntimeUpgrade, OneSessionHandler, RankedMembers, RankedMembersSwapHandler,
-		},
+	pub use frame_support::traits::{
+		Contains, EitherOf, EstimateNextSessionRotation, Everything, IsSubType, MapSuccess,
+		NoOpPoll, OnRuntimeUpgrade, OneSessionHandler, RankedMembers, RankedMembersSwapHandler,
+		VariantCount, VariantCountOf,
 	};
 
 	/// Pallet prelude of `frame-system`.
@@ -225,6 +223,9 @@ pub mod prelude {
 	/// All hashing related things
 	pub use super::hashing::*;
 
+	/// All account related things.
+	pub use super::account::*;
+
 	/// All arithmetic types and traits used for safe math.
 	pub use super::arithmetic::*;
 
@@ -234,6 +235,10 @@ pub mod prelude {
 		BlockNumberProvider, Bounded, Convert, DispatchInfoOf, Dispatchable, ReduceBy,
 		ReplaceWithDefault, SaturatedConversion, Saturating, StaticLookup, TrailingZeroInput,
 	};
+
+	/// Bounded storage related types.
+	pub use sp_runtime::{BoundedSlice, BoundedVec};
+
 	/// Other error/result types for runtime
 	#[doc(no_inline)]
 	pub use sp_runtime::{
@@ -321,7 +326,7 @@ pub mod testing_prelude {
 	/// Other helper macros from `frame_support` that help with asserting in tests.
 	pub use frame_support::{
 		assert_err, assert_err_ignore_postinfo, assert_error_encoded_size, assert_noop, assert_ok,
-		assert_storage_noop, hypothetically, storage_alias,
+		assert_storage_noop, ensure, hypothetically, storage_alias,
 	};
 
 	pub use frame_system::{self, mocking::*, RunToBlockHooks};
@@ -551,6 +556,16 @@ pub mod hashing {
 	pub use sp_runtime::traits::{BlakeTwo256, Hash, Keccak256};
 }
 
+/// All account management related traits.
+///
+/// This is already part of the [`prelude`].
+pub mod account {
+	pub use frame_support::traits::{
+		AsEnsureOriginWithArg, ChangeMembers, EitherOfDiverse, InitializeMembers,
+	};
+	pub use sp_runtime::traits::{IdentifyAccount, IdentityLookup};
+}
+
 /// Access to all of the dependencies of this crate. In case the prelude re-exports are not enough,
 /// this module can be used.
 ///
-- 
GitLab


From 2c4ceccebe2c338029eef243645455d525a5a78b Mon Sep 17 00:00:00 2001
From: Benjamin Gallois <benjamin@gallois.cc>
Date: Mon, 20 Jan 2025 22:19:48 +0100
Subject: [PATCH 086/116] Fix `frame-benchmarking-cli` not buildable without
 rocksdb (#7263)

## Description

The `frame-benchmarking-cli` crate has not been buildable without the
`rocksdb` feature since version 1.17.0.

**Error:**
```rust
self.database()?.unwrap_or(Database::RocksDb),
                             ^^^^^^^ variant or associated item not found in `Database`
```

This issue is also related to the `rocksdb` feature bleeding (#3793),
where the `rocksdb` feature was always activated even when compiling
this crate with `--no-default-features`.

**Fix:**
- Resolved the error by choosing `paritydb` as the default database when
compiled without the `rocksdb` feature.
- Fixed the issue where the `sc-cli` crate's `rocksdb` feature was
always active, even compiling `frame-benchmarking-cli` with
`--no-default-features`.

## Review Notes

Fix the crate to be built without rocksdb, not intended to solve #3793.

---------

Co-authored-by: command-bot <>
---
 polkadot/node/metrics/Cargo.toml              |  2 +-
 prdoc/pr_7263.prdoc                           | 28 +++++++++++++++++++
 .../benchmarking-cli/src/overhead/command.rs  |  2 +-
 3 files changed, 30 insertions(+), 2 deletions(-)
 create mode 100644 prdoc/pr_7263.prdoc

diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml
index 454337cb63f..318deca4f24 100644
--- a/polkadot/node/metrics/Cargo.toml
+++ b/polkadot/node/metrics/Cargo.toml
@@ -18,7 +18,7 @@ gum = { workspace = true, default-features = true }
 
 metered = { features = ["futures_channel"], workspace = true }
 # Both `sc-service` and `sc-cli` are required by runtime metrics `logger_hook()`.
-sc-cli = { workspace = true, default-features = true }
+sc-cli = { workspace = true }
 sc-service = { workspace = true, default-features = true }
 
 bs58 = { features = ["alloc"], workspace = true, default-features = true }
diff --git a/prdoc/pr_7263.prdoc b/prdoc/pr_7263.prdoc
new file mode 100644
index 00000000000..892e8049395
--- /dev/null
+++ b/prdoc/pr_7263.prdoc
@@ -0,0 +1,28 @@
+title: Fix `frame-benchmarking-cli` not buildable without rocksdb
+doc:
+- audience: Runtime Dev
+  description: |-
+    ## Description
+
+    The `frame-benchmarking-cli`  crate has not been buildable without the `rocksdb` feature since version 1.17.0.
+
+    **Error:**
+    ```rust
+    self.database()?.unwrap_or(Database::RocksDb),
+                                 ^^^^^^^ variant or associated item not found in `Database`
+    ```
+
+    This issue is also related to the `rocksdb` feature bleeding (#3793), where the `rocksdb` feature was always activated even when compiling this crate with `--no-default-features`.
+
+    **Fix:**
+    - Resolved the error by choosing `paritydb` as the default database when compiled without the `rocksdb` feature.
+    - Fixed the issue where the `sc-cli` crate's `rocksdb` feature was always active, even compiling `frame-benchmarking-cli` with `--no-default-features`.
+
+    ## Review Notes
+
+    Fix the crate to be built without rocksdb, not intended to solve #3793.
+crates:
+- name: polkadot-node-metrics
+  bump: patch
+- name: frame-benchmarking-cli
+  bump: patch
diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs
index 8df8ee5464f..847f8e16c0d 100644
--- a/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs
+++ b/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs
@@ -482,7 +482,7 @@ impl OverheadCmd {
 		let database_source = self.database_config(
 			&base_path.path().to_path_buf(),
 			self.database_cache_size()?.unwrap_or(1024),
-			self.database()?.unwrap_or(Database::RocksDb),
+			self.database()?.unwrap_or(Database::Auto),
 		)?;
 
 		let backend = new_db_backend(DatabaseSettings {
-- 
GitLab


From cbf3925e1fe1383b998cfb428038c46da1577501 Mon Sep 17 00:00:00 2001
From: PG Herveou <pgherveou@gmail.com>
Date: Mon, 20 Jan 2025 23:58:21 +0100
Subject: [PATCH 087/116] [eth-indexer] subscribe to finalize blocks instead of
 best blocks (#7260)

For eth-indexer, it's probably safer to use `subscribe_finalized` and
index these blocks into the DB rather than `subscribe_best`

---------

Co-authored-by: command-bot <>
---
 prdoc/pr_7260.prdoc                      | 10 ++++++
 substrate/frame/revive/rpc/src/client.rs | 45 +++++++++++++++---------
 2 files changed, 39 insertions(+), 16 deletions(-)
 create mode 100644 prdoc/pr_7260.prdoc

diff --git a/prdoc/pr_7260.prdoc b/prdoc/pr_7260.prdoc
new file mode 100644
index 00000000000..62f73120bc1
--- /dev/null
+++ b/prdoc/pr_7260.prdoc
@@ -0,0 +1,10 @@
+title: '[eth-indexer] subscribe to finalize blocks instead of best blocks'
+doc:
+- audience: Runtime Dev
+  description: 'For eth-indexer, it''s probably safer to use `subscribe_finalized`
+    and index these blocks into the DB rather than `subscribe_best`
+
+    '
+crates:
+- name: pallet-revive-eth-rpc
+  bump: minor
diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs
index a5a022f9722..7a72f8e26b0 100644
--- a/substrate/frame/revive/rpc/src/client.rs
+++ b/substrate/frame/revive/rpc/src/client.rs
@@ -68,6 +68,14 @@ pub type Shared<T> = Arc<RwLock<T>>;
 /// The runtime balance type.
 pub type Balance = u128;
 
+/// The subscription type used to listen to new blocks.
+pub enum SubscriptionType {
+	/// Subscribe to the best blocks.
+	BestBlocks,
+	/// Subscribe to the finalized blocks.
+	FinalizedBlocks,
+}
+
 /// Unwrap the original `jsonrpsee::core::client::Error::Call` error.
 fn unwrap_call_err(err: &subxt::error::RpcError) -> Option<ErrorObjectOwned> {
 	use subxt::backend::rpc::reconnecting_rpc_client;
@@ -278,19 +286,23 @@ impl Client {
 
 	/// Subscribe to new best blocks, and execute the async closure with
 	/// the extracted block and ethereum transactions
-	async fn subscribe_new_blocks<F, Fut>(&self, callback: F) -> Result<(), ClientError>
+	async fn subscribe_new_blocks<F, Fut>(
+		&self,
+		subscription_type: SubscriptionType,
+		callback: F,
+	) -> Result<(), ClientError>
 	where
 		F: Fn(SubstrateBlock) -> Fut + Send + Sync,
 		Fut: std::future::Future<Output = Result<(), ClientError>> + Send,
 	{
 		log::info!(target: LOG_TARGET, "Subscribing to new blocks");
-		let mut block_stream = match self.api.blocks().subscribe_best().await {
-			Ok(s) => s,
-			Err(err) => {
-				log::error!(target: LOG_TARGET, "Failed to subscribe to blocks: {err:?}");
-				return Err(err.into());
-			},
-		};
+		let mut block_stream = match subscription_type {
+			SubscriptionType::BestBlocks => self.api.blocks().subscribe_best().await,
+			SubscriptionType::FinalizedBlocks => self.api.blocks().subscribe_finalized().await,
+		}
+		.inspect_err(|err| {
+			log::error!(target: LOG_TARGET, "Failed to subscribe to blocks: {err:?}");
+		})?;
 
 		while let Some(block) = block_stream.next().await {
 			let block = match block {
@@ -324,7 +336,7 @@ impl Client {
 		let client = self.clone();
 		spawn_handle.spawn("subscribe-blocks", None, async move {
 			let res = client
-				.subscribe_new_blocks(|block| async {
+				.subscribe_new_blocks(SubscriptionType::BestBlocks, |block| async {
 					let receipts = extract_receipts_from_block(&block).await?;
 
 					client.receipt_provider.insert(&block.hash(), &receipts).await;
@@ -347,13 +359,14 @@ impl Client {
 		&self,
 		oldest_block: Option<SubstrateBlockNumber>,
 	) -> Result<(), ClientError> {
-		let new_blocks_fut = self.subscribe_new_blocks(|block| async move {
-			let receipts = extract_receipts_from_block(&block).await.inspect_err(|err| {
-				log::error!(target: LOG_TARGET, "Failed to extract receipts from block: {err:?}");
-			})?;
-			self.receipt_provider.insert(&block.hash(), &receipts).await;
-			Ok(())
-		});
+		let new_blocks_fut =
+			self.subscribe_new_blocks(SubscriptionType::FinalizedBlocks, |block| async move {
+				let receipts = extract_receipts_from_block(&block).await.inspect_err(|err| {
+					log::error!(target: LOG_TARGET, "Failed to extract receipts from block: {err:?}");
+				})?;
+				self.receipt_provider.insert(&block.hash(), &receipts).await;
+				Ok(())
+			});
 
 		let Some(oldest_block) = oldest_block else { return new_blocks_fut.await };
 
-- 
GitLab


From 12ed0f4ffe4dcf3a8fe8928e3791141a110fad8b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jos=C3=A9=20Molina=20Colmenero?= <jose@blockdeep.io>
Date: Tue, 21 Jan 2025 10:49:09 +0100
Subject: [PATCH 088/116] Add an extra_constant to pallet-collator-selection
 (#7206)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Currently `pallet-collator-selection` does not expose a way to query the
assigned pot account derived from the `PotId` configuration item.
Without it, it is not possible to transfer the existential deposit to
it.

This PR addresses this issue by exposing an extra constant.

---------

Co-authored-by: Bastian Köcher <git@kchr.de>
---
 cumulus/pallets/collator-selection/src/lib.rs | 13 +++++++++++++
 prdoc/pr_7206.prdoc                           | 13 +++++++++++++
 2 files changed, 26 insertions(+)
 create mode 100644 prdoc/pr_7206.prdoc

diff --git a/cumulus/pallets/collator-selection/src/lib.rs b/cumulus/pallets/collator-selection/src/lib.rs
index 9d7e62af3c6..34c6ca8b36e 100644
--- a/cumulus/pallets/collator-selection/src/lib.rs
+++ b/cumulus/pallets/collator-selection/src/lib.rs
@@ -150,22 +150,27 @@ pub mod pallet {
 		type UpdateOrigin: EnsureOrigin<Self::RuntimeOrigin>;
 
 		/// Account Identifier from which the internal Pot is generated.
+		#[pallet::constant]
 		type PotId: Get<PalletId>;
 
 		/// Maximum number of candidates that we should have.
 		///
 		/// This does not take into account the invulnerables.
+		#[pallet::constant]
 		type MaxCandidates: Get<u32>;
 
 		/// Minimum number eligible collators. Should always be greater than zero. This includes
 		/// Invulnerable collators. This ensures that there will always be one collator who can
 		/// produce a block.
+		#[pallet::constant]
 		type MinEligibleCollators: Get<u32>;
 
 		/// Maximum number of invulnerables.
+		#[pallet::constant]
 		type MaxInvulnerables: Get<u32>;
 
 		// Will be kicked if block is not produced in threshold.
+		#[pallet::constant]
 		type KickThreshold: Get<BlockNumberFor<Self>>;
 
 		/// A stable ID for a validator.
@@ -183,6 +188,14 @@ pub mod pallet {
 		type WeightInfo: WeightInfo;
 	}
 
+	#[pallet::extra_constants]
+	impl<T: Config> Pallet<T> {
+		/// Gets this pallet's derived pot account.
+		fn pot_account() -> T::AccountId {
+			Self::account_id()
+		}
+	}
+
 	/// Basic information about a collation candidate.
 	#[derive(
 		PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, scale_info::TypeInfo, MaxEncodedLen,
diff --git a/prdoc/pr_7206.prdoc b/prdoc/pr_7206.prdoc
new file mode 100644
index 00000000000..d605308ba54
--- /dev/null
+++ b/prdoc/pr_7206.prdoc
@@ -0,0 +1,13 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: "Add an extra_constant to pallet-collator-selection"
+
+doc:
+  - audience: Runtime Dev
+    description: |
+      - Allows to query collator-selection's pot account via extra constant.
+
+crates:
+  - name: pallet-collator-selection
+    bump: minor
\ No newline at end of file
-- 
GitLab


From c0c0632c2efca435e973a1f6788e24235fe0e2a6 Mon Sep 17 00:00:00 2001
From: Clara van Staden <claravanstaden64@gmail.com>
Date: Tue, 21 Jan 2025 16:11:50 +0200
Subject: [PATCH 089/116] Snowbridge - Copy Rococo integration tests to Westend
 (#7108)

Copies all the integration tests from Rococo to Westend.

Closes: https://github.com/paritytech/polkadot-sdk/issues/6389
---
 .../bridges/bridge-hub-westend/src/lib.rs     |   3 +
 .../bridges/bridge-hub-westend/src/lib.rs     |   4 +-
 .../src/tests/snowbridge.rs                   | 647 +++++++++++++++++-
 3 files changed, 646 insertions(+), 8 deletions(-)

diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs
index b548e3b7e64..1b6f7965188 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs
@@ -18,6 +18,7 @@ pub mod genesis;
 pub use bridge_hub_westend_runtime::{
 	self, xcm_config::XcmConfig as BridgeHubWestendXcmConfig,
 	ExistentialDeposit as BridgeHubWestendExistentialDeposit,
+	RuntimeOrigin as BridgeHubWestendRuntimeOrigin,
 };
 
 // Substrate
@@ -47,6 +48,8 @@ decl_test_parachains! {
 			PolkadotXcm: bridge_hub_westend_runtime::PolkadotXcm,
 			Balances: bridge_hub_westend_runtime::Balances,
 			EthereumSystem: bridge_hub_westend_runtime::EthereumSystem,
+			EthereumInboundQueue: bridge_hub_westend_runtime::EthereumInboundQueue,
+			EthereumOutboundQueue: bridge_hub_westend_runtime::EthereumOutboundQueue,
 		}
 	},
 }
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs
index 501ddb84d42..3d4d4f58e3b 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs
@@ -51,9 +51,11 @@ mod imports {
 		},
 		bridge_hub_westend_emulated_chain::{
 			genesis::ED as BRIDGE_HUB_WESTEND_ED, BridgeHubWestendExistentialDeposit,
-			BridgeHubWestendParaPallet as BridgeHubWestendPallet, BridgeHubWestendXcmConfig,
+			BridgeHubWestendParaPallet as BridgeHubWestendPallet, BridgeHubWestendRuntimeOrigin,
+			BridgeHubWestendXcmConfig,
 		},
 		penpal_emulated_chain::{
+			self,
 			penpal_runtime::xcm_config::{
 				CustomizableAssetFromSystemAssetHub as PenpalCustomizableAssetFromSystemAssetHub,
 				LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub,
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs
index ffa60a4f52e..15ca3a5cf1b 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs
@@ -12,15 +12,18 @@
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
 // limitations under the License.
-use crate::imports::*;
+use crate::{imports::*, tests::penpal_emulated_chain::penpal_runtime};
 use asset_hub_westend_runtime::xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee;
-use bridge_hub_westend_runtime::EthereumInboundQueue;
+use bridge_hub_westend_runtime::{
+	bridge_to_ethereum_config::EthereumGatewayAddress, EthereumBeaconClient, EthereumInboundQueue,
+};
 use codec::{Decode, Encode};
-use emulated_integration_tests_common::RESERVABLE_ASSET_ID;
+use emulated_integration_tests_common::{PENPAL_B_ID, RESERVABLE_ASSET_ID};
 use frame_support::pallet_prelude::TypeInfo;
 use hex_literal::hex;
 use rococo_westend_system_emulated_network::asset_hub_westend_emulated_chain::genesis::AssetHubWestendAssetOwner;
-use snowbridge_core::{outbound::OperatingMode, AssetMetadata, TokenIdOf};
+use snowbridge_core::{inbound::InboundQueueFixture, AssetMetadata, TokenIdOf};
+use snowbridge_pallet_inbound_queue_fixtures::send_native_eth::make_send_native_eth_message;
 use snowbridge_router_primitives::inbound::{
 	Command, Destination, EthereumLocationsConverterFor, MessageV1, VersionedMessage,
 };
@@ -28,19 +31,20 @@ use sp_core::H256;
 use testnet_parachains_constants::westend::snowbridge::EthereumNetwork;
 use xcm_executor::traits::ConvertLocation;
 
-const INITIAL_FUND: u128 = 5_000_000_000_000;
 pub const CHAIN_ID: u64 = 11155111;
 pub const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d");
+const INITIAL_FUND: u128 = 5_000_000_000_000;
 const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e");
 const XCM_FEE: u128 = 100_000_000_000;
+const INSUFFICIENT_XCM_FEE: u128 = 1000;
 const TOKEN_AMOUNT: u128 = 100_000_000_000;
+const TREASURY_ACCOUNT: [u8; 32] =
+	hex!("6d6f646c70792f74727372790000000000000000000000000000000000000000");
 
 #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
 pub enum ControlCall {
 	#[codec(index = 3)]
 	CreateAgent,
-	#[codec(index = 4)]
-	CreateChannel { mode: OperatingMode },
 }
 
 #[allow(clippy::large_enum_variant)]
@@ -50,6 +54,75 @@ pub enum SnowbridgeControl {
 	Control(ControlCall),
 }
 
+pub fn send_inbound_message(fixture: InboundQueueFixture) -> DispatchResult {
+	EthereumBeaconClient::store_finalized_header(
+		fixture.finalized_header,
+		fixture.block_roots_root,
+	)
+	.unwrap();
+	EthereumInboundQueue::submit(
+		BridgeHubWestendRuntimeOrigin::signed(BridgeHubWestendSender::get()),
+		fixture.message,
+	)
+}
+
+/// Create an agent on Ethereum. An agent is a representation of an entity in the Polkadot
+/// ecosystem (like a parachain) on Ethereum.
+#[test]
+#[ignore]
+fn create_agent() {
+	let origin_para: u32 = 1001;
+	// Fund the origin parachain sovereign account so that it can pay execution fees.
+	BridgeHubWestend::fund_para_sovereign(origin_para.into(), INITIAL_FUND);
+
+	let sudo_origin = <Westend as Chain>::RuntimeOrigin::root();
+	let destination = Westend::child_location_of(BridgeHubWestend::para_id()).into();
+
+	let create_agent_call = SnowbridgeControl::Control(ControlCall::CreateAgent {});
+	// Construct XCM to create an agent for para 1001
+	let remote_xcm = VersionedXcm::from(Xcm(vec![
+		UnpaidExecution { weight_limit: Unlimited, check_origin: None },
+		DescendOrigin(Parachain(origin_para).into()),
+		Transact {
+			origin_kind: OriginKind::Xcm,
+			call: create_agent_call.encode().into(),
+			fallback_max_weight: None,
+		},
+	]));
+
+	// Westend Global Consensus
+	// Send XCM message from Relay Chain to Bridge Hub source Parachain
+	Westend::execute_with(|| {
+		assert_ok!(<Westend as WestendPallet>::XcmPallet::send(
+			sudo_origin,
+			bx!(destination),
+			bx!(remote_xcm),
+		));
+
+		type RuntimeEvent = <Westend as Chain>::RuntimeEvent;
+		// Check that the Transact message was sent
+		assert_expected_events!(
+			Westend,
+			vec![
+				RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {},
+			]
+		);
+	});
+
+	BridgeHubWestend::execute_with(|| {
+		type RuntimeEvent = <BridgeHubWestend as Chain>::RuntimeEvent;
+		// Check that a message was sent to Ethereum to create the agent
+		assert_expected_events!(
+			BridgeHubWestend,
+			vec![
+				RuntimeEvent::EthereumSystem(snowbridge_pallet_system::Event::CreateAgent {
+					..
+				}) => {},
+			]
+		);
+	});
+}
+
 /// Tests the registering of a token as an asset on AssetHub.
 #[test]
 fn register_weth_token_from_ethereum_to_asset_hub() {
@@ -82,6 +155,566 @@ fn register_weth_token_from_ethereum_to_asset_hub() {
 	});
 }
 
+/// Tests the registering of a token as an asset on AssetHub, and then subsequently sending
+/// a token from Ethereum to AssetHub.
+#[test]
+fn send_weth_token_from_ethereum_to_asset_hub() {
+	let ethereum_network: NetworkId = EthereumNetwork::get().into();
+	let origin_location = Location::new(2, ethereum_network);
+	let ethereum_sovereign: AccountId =
+		EthereumLocationsConverterFor::<AccountId>::convert_location(&origin_location).unwrap();
+
+	BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id().into(), INITIAL_FUND);
+
+	// Fund ethereum sovereign on AssetHub
+	AssetHubWestend::fund_accounts(vec![
+		(AssetHubWestendReceiver::get(), INITIAL_FUND),
+		(ethereum_sovereign, INITIAL_FUND),
+	]);
+
+	// Register the token
+	BridgeHubWestend::execute_with(|| {
+		type RuntimeEvent = <BridgeHubWestend as Chain>::RuntimeEvent;
+
+		let message = VersionedMessage::V1(MessageV1 {
+			chain_id: CHAIN_ID,
+			command: Command::RegisterToken { token: WETH.into(), fee: XCM_FEE },
+		});
+		let (xcm, _) = EthereumInboundQueue::do_convert([0; 32].into(), message).unwrap();
+		let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubWestend::para_id().into()).unwrap();
+
+		assert_expected_events!(
+			BridgeHubWestend,
+			vec![RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {},]
+		);
+	});
+
+	AssetHubWestend::execute_with(|| {
+		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+
+		assert_expected_events!(
+			AssetHubWestend,
+			vec![RuntimeEvent::ForeignAssets(pallet_assets::Event::Created { .. }) => {},]
+		);
+	});
+
+	// Send the token
+	BridgeHubWestend::execute_with(|| {
+		type RuntimeEvent = <BridgeHubWestend as Chain>::RuntimeEvent;
+
+		type EthereumInboundQueue =
+			<BridgeHubWestend as BridgeHubWestendPallet>::EthereumInboundQueue;
+		let message_id: H256 = [0; 32].into();
+		let message = VersionedMessage::V1(MessageV1 {
+			chain_id: CHAIN_ID,
+			command: Command::SendToken {
+				token: WETH.into(),
+				destination: Destination::AccountId32 { id: AssetHubWestendSender::get().into() },
+				amount: 1_000_000,
+				fee: XCM_FEE,
+			},
+		});
+		let (xcm, _) = EthereumInboundQueue::do_convert(message_id, message).unwrap();
+		assert_ok!(EthereumInboundQueue::send_xcm(xcm, AssetHubWestend::para_id().into()));
+
+		assert_expected_events!(
+			BridgeHubWestend,
+			vec![RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {},]
+		);
+	});
+
+	AssetHubWestend::execute_with(|| {
+		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+
+		// Check that the token was received and issued as a foreign asset on AssetHub
+		assert_expected_events!(
+			AssetHubWestend,
+			vec![
+				RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {},
+			]
+		);
+	});
+}
+
+/// Tests sending a token to a 3rd party parachain, called PenPal. The token reserve is
+/// still located on AssetHub.
+#[test]
+fn send_weth_from_ethereum_to_penpal() {
+	let asset_hub_sovereign = BridgeHubWestend::sovereign_account_id_of(Location::new(
+		1,
+		[Parachain(AssetHubWestend::para_id().into())],
+	));
+	// Fund AssetHub sovereign account so it can pay execution fees for the asset transfer
+	BridgeHubWestend::fund_accounts(vec![(asset_hub_sovereign.clone(), INITIAL_FUND)]);
+
+	// Fund PenPal receiver (covering ED)
+	let native_id: Location = Parent.into();
+	let receiver: AccountId = [
+		28, 189, 45, 67, 83, 10, 68, 112, 90, 208, 136, 175, 49, 62, 24, 248, 11, 83, 239, 22, 179,
+		97, 119, 205, 75, 119, 184, 70, 242, 165, 240, 124,
+	]
+	.into();
+	PenpalB::mint_foreign_asset(
+		<PenpalB as Chain>::RuntimeOrigin::signed(PenpalAssetOwner::get()),
+		native_id,
+		receiver,
+		penpal_runtime::EXISTENTIAL_DEPOSIT,
+	);
+
+	PenpalB::execute_with(|| {
+		assert_ok!(<PenpalB as Chain>::System::set_storage(
+			<PenpalB as Chain>::RuntimeOrigin::root(),
+			vec![(
+				PenpalCustomizableAssetFromSystemAssetHub::key().to_vec(),
+				Location::new(2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })]).encode(),
+			)],
+		));
+	});
+
+	let ethereum_network_v5: NetworkId = EthereumNetwork::get().into();
+
+	// The Weth asset location, identified by the contract address on Ethereum
+	let weth_asset_location: Location =
+		(Parent, Parent, ethereum_network_v5, AccountKey20 { network: None, key: WETH }).into();
+
+	let origin_location = (Parent, Parent, ethereum_network_v5).into();
+
+	// Fund ethereum sovereign on AssetHub
+	let ethereum_sovereign: AccountId =
+		EthereumLocationsConverterFor::<AccountId>::convert_location(&origin_location).unwrap();
+	AssetHubWestend::fund_accounts(vec![(ethereum_sovereign.clone(), INITIAL_FUND)]);
+
+	// Create asset on the Penpal parachain.
+	PenpalB::execute_with(|| {
+		assert_ok!(<PenpalB as PenpalBPallet>::ForeignAssets::force_create(
+			<PenpalB as Chain>::RuntimeOrigin::root(),
+			weth_asset_location.clone(),
+			asset_hub_sovereign.into(),
+			false,
+			1000,
+		));
+
+		assert!(<PenpalB as PenpalBPallet>::ForeignAssets::asset_exists(weth_asset_location));
+	});
+
+	// Register the token
+	BridgeHubWestend::execute_with(|| {
+		type RuntimeEvent = <BridgeHubWestend as Chain>::RuntimeEvent;
+
+		let message = VersionedMessage::V1(MessageV1 {
+			chain_id: CHAIN_ID,
+			command: Command::RegisterToken { token: WETH.into(), fee: XCM_FEE },
+		});
+		let (xcm, _) = EthereumInboundQueue::do_convert([0; 32].into(), message).unwrap();
+		let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubWestend::para_id().into()).unwrap();
+
+		assert_expected_events!(
+			BridgeHubWestend,
+			vec![RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {},]
+		);
+	});
+
+	AssetHubWestend::execute_with(|| {
+		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+
+		assert_expected_events!(
+			AssetHubWestend,
+			vec![RuntimeEvent::ForeignAssets(pallet_assets::Event::Created { .. }) => {},]
+		);
+	});
+
+	// Send the token
+	BridgeHubWestend::execute_with(|| {
+		type RuntimeEvent = <BridgeHubWestend as Chain>::RuntimeEvent;
+
+		type EthereumInboundQueue =
+			<BridgeHubWestend as BridgeHubWestendPallet>::EthereumInboundQueue;
+		let message_id: H256 = [0; 32].into();
+		let message = VersionedMessage::V1(MessageV1 {
+			chain_id: CHAIN_ID,
+			command: Command::SendToken {
+				token: WETH.into(),
+				destination: Destination::ForeignAccountId32 {
+					para_id: PENPAL_B_ID,
+					id: PenpalBReceiver::get().into(),
+					fee: XCM_FEE,
+				},
+				amount: 1_000_000,
+				fee: XCM_FEE,
+			},
+		});
+		let (xcm, _) = EthereumInboundQueue::do_convert(message_id, message).unwrap();
+		assert_ok!(EthereumInboundQueue::send_xcm(xcm, AssetHubWestend::para_id().into()));
+
+		assert_expected_events!(
+			BridgeHubWestend,
+			vec![RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {},]
+		);
+	});
+
+	AssetHubWestend::execute_with(|| {
+		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+		// Check that the assets were issued on AssetHub
+		assert_expected_events!(
+			AssetHubWestend,
+			vec![
+				RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {},
+				RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {},
+			]
+		);
+	});
+
+	PenpalB::execute_with(|| {
+		type RuntimeEvent = <PenpalB as Chain>::RuntimeEvent;
+		// Check that the assets were issued on PenPal
+		assert_expected_events!(
+			PenpalB,
+			vec![
+				RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {},
+			]
+		);
+	});
+}
+
+/// Tests the full cycle of eth transfers:
+/// - sending a token to AssetHub
+/// - returning the token to Ethereum
+#[test]
+fn send_eth_asset_from_asset_hub_to_ethereum_and_back() {
+	let ethereum_network: NetworkId = EthereumNetwork::get().into();
+	let origin_location = (Parent, Parent, ethereum_network).into();
+
+	use asset_hub_westend_runtime::xcm_config::bridging::to_ethereum::DefaultBridgeHubEthereumBaseFee;
+	let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id());
+	let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location);
+	let ethereum_sovereign: AccountId =
+		EthereumLocationsConverterFor::<AccountId>::convert_location(&origin_location).unwrap();
+
+	AssetHubWestend::force_default_xcm_version(Some(XCM_VERSION));
+	BridgeHubWestend::force_default_xcm_version(Some(XCM_VERSION));
+	AssetHubWestend::force_xcm_version(origin_location.clone(), XCM_VERSION);
+
+	BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]);
+	AssetHubWestend::fund_accounts(vec![
+		(AssetHubWestendReceiver::get(), INITIAL_FUND),
+		(ethereum_sovereign.clone(), INITIAL_FUND),
+	]);
+
+	// Register ETH
+	AssetHubWestend::execute_with(|| {
+		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+		type RuntimeOrigin = <AssetHubWestend as Chain>::RuntimeOrigin;
+		assert_ok!(<AssetHubWestend as AssetHubWestendPallet>::ForeignAssets::force_create(
+			RuntimeOrigin::root(),
+			origin_location.clone(),
+			ethereum_sovereign.into(),
+			true,
+			1000,
+		));
+
+		assert_expected_events!(
+			AssetHubWestend,
+			vec![
+				RuntimeEvent::ForeignAssets(pallet_assets::Event::ForceCreated { .. }) => {},
+			]
+		);
+	});
+	const ETH_AMOUNT: u128 = 1_000_000_000_000_000_000;
+
+	BridgeHubWestend::execute_with(|| {
+		type RuntimeEvent = <BridgeHubWestend as Chain>::RuntimeEvent;
+		type RuntimeOrigin = <BridgeHubWestend as Chain>::RuntimeOrigin;
+
+		// Set the gateway. This is needed because new fixtures use a different gateway address.
+		assert_ok!(<BridgeHubWestend as Chain>::System::set_storage(
+			RuntimeOrigin::root(),
+			vec![(
+				EthereumGatewayAddress::key().to_vec(),
+				sp_core::H160(hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d")).encode(),
+			)],
+		));
+
+		// Construct SendToken message and sent to inbound queue
+		assert_ok!(send_inbound_message(make_send_native_eth_message()));
+
+		// Check that the send token message was sent using xcm
+		assert_expected_events!(
+			BridgeHubWestend,
+			vec![
+				RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {},
+			]
+		);
+	});
+
+	AssetHubWestend::execute_with(|| {
+		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+		type RuntimeOrigin = <AssetHubWestend as Chain>::RuntimeOrigin;
+
+		let _issued_event = RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued {
+			asset_id: origin_location.clone(),
+			owner: AssetHubWestendReceiver::get().into(),
+			amount: ETH_AMOUNT,
+		});
+		// Check that AssetHub has issued the foreign asset
+		assert_expected_events!(
+			AssetHubWestend,
+			vec![
+				_issued_event => {},
+			]
+		);
+		let assets =
+			vec![Asset { id: AssetId(origin_location.clone()), fun: Fungible(ETH_AMOUNT) }];
+		let multi_assets = VersionedAssets::from(Assets::from(assets));
+
+		let destination = origin_location.clone().into();
+
+		let beneficiary = VersionedLocation::from(Location::new(
+			0,
+			[AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }],
+		));
+
+		let free_balance_before =
+			<AssetHubWestend as AssetHubWestendPallet>::Balances::free_balance(
+				AssetHubWestendReceiver::get(),
+			);
+		// Send the Weth back to Ethereum
+		<AssetHubWestend as AssetHubWestendPallet>::PolkadotXcm::limited_reserve_transfer_assets(
+			RuntimeOrigin::signed(AssetHubWestendReceiver::get()),
+			Box::new(destination),
+			Box::new(beneficiary),
+			Box::new(multi_assets),
+			0,
+			Unlimited,
+		)
+		.unwrap();
+
+		let _burned_event = RuntimeEvent::ForeignAssets(pallet_assets::Event::Burned {
+			asset_id: origin_location.clone(),
+			owner: AssetHubWestendReceiver::get().into(),
+			balance: ETH_AMOUNT,
+		});
+		// Check that AssetHub has issued the foreign asset
+		let _destination = origin_location.clone();
+		assert_expected_events!(
+			AssetHubWestend,
+			vec![
+				_burned_event => {},
+				RuntimeEvent::PolkadotXcm(pallet_xcm::Event::Sent {
+					destination: _destination, ..
+				}) => {},
+			]
+		);
+
+		let free_balance_after = <AssetHubWestend as AssetHubWestendPallet>::Balances::free_balance(
+			AssetHubWestendReceiver::get(),
+		);
+		// Assert at least DefaultBridgeHubEthereumBaseFee charged from the sender
+		let free_balance_diff = free_balance_before - free_balance_after;
+		assert!(free_balance_diff > DefaultBridgeHubEthereumBaseFee::get());
+	});
+
+	BridgeHubWestend::execute_with(|| {
+		type RuntimeEvent = <BridgeHubWestend as Chain>::RuntimeEvent;
+		// Check that the transfer token back to Ethereum message was queue in the Ethereum
+		// Outbound Queue
+		assert_expected_events!(
+			BridgeHubWestend,
+			vec![
+				RuntimeEvent::EthereumOutboundQueue(snowbridge_pallet_outbound_queue::Event::MessageAccepted {..}) => {},
+				RuntimeEvent::EthereumOutboundQueue(snowbridge_pallet_outbound_queue::Event::MessageQueued {..}) => {},
+			]
+		);
+
+		let events = BridgeHubWestend::events();
+		// Check that the local fee was credited to the Snowbridge sovereign account
+		assert!(
+			events.iter().any(|event| matches!(
+				event,
+				RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount: _ })
+					if *who == TREASURY_ACCOUNT.into()
+			)),
+			"Snowbridge sovereign takes local fee."
+		);
+		// Check that the remote fee was credited to the AssetHub sovereign account
+		assert!(
+			events.iter().any(|event| matches!(
+				event,
+				RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount: _ })
+					if *who == assethub_sovereign
+			)),
+			"AssetHub sovereign takes remote fee."
+		);
+	});
+}
+
+#[test]
+fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() {
+	BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id().into(), INITIAL_FUND);
+
+	BridgeHubWestend::execute_with(|| {
+		type RuntimeEvent = <BridgeHubWestend as Chain>::RuntimeEvent;
+		type EthereumInboundQueue =
+			<BridgeHubWestend as BridgeHubWestendPallet>::EthereumInboundQueue;
+		let message_id: H256 = [0; 32].into();
+		let message = VersionedMessage::V1(MessageV1 {
+			chain_id: CHAIN_ID,
+			command: Command::RegisterToken {
+				token: WETH.into(),
+				// Insufficient fee which should trigger the trap
+				fee: INSUFFICIENT_XCM_FEE,
+			},
+		});
+		let (xcm, _) = EthereumInboundQueue::do_convert(message_id, message).unwrap();
+		let _ = EthereumInboundQueue::send_xcm(xcm, AssetHubWestend::para_id().into()).unwrap();
+
+		assert_expected_events!(
+			BridgeHubWestend,
+			vec![
+				RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {},
+			]
+		);
+	});
+
+	AssetHubWestend::execute_with(|| {
+		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+
+		assert_expected_events!(
+			AssetHubWestend,
+			vec![
+				RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success:false, .. }) => {},
+			]
+		);
+	});
+}
+
+fn send_weth_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u128) {
+	let ethereum_network_v5: NetworkId = EthereumNetwork::get().into();
+	let weth_asset_location: Location =
+		Location::new(2, [ethereum_network_v5.into(), AccountKey20 { network: None, key: WETH }]);
+	// Fund asset hub sovereign on bridge hub
+	let asset_hub_sovereign = BridgeHubWestend::sovereign_account_id_of(Location::new(
+		1,
+		[Parachain(AssetHubWestend::para_id().into())],
+	));
+	BridgeHubWestend::fund_accounts(vec![(asset_hub_sovereign.clone(), INITIAL_FUND)]);
+
+	// Register WETH
+	AssetHubWestend::execute_with(|| {
+		type RuntimeOrigin = <AssetHubWestend as Chain>::RuntimeOrigin;
+
+		assert_ok!(<AssetHubWestend as AssetHubWestendPallet>::ForeignAssets::force_create(
+			RuntimeOrigin::root(),
+			weth_asset_location.clone().try_into().unwrap(),
+			asset_hub_sovereign.into(),
+			false,
+			1,
+		));
+
+		assert!(<AssetHubWestend as AssetHubWestendPallet>::ForeignAssets::asset_exists(
+			weth_asset_location.clone().try_into().unwrap(),
+		));
+	});
+
+	// Send WETH to an existent account on asset hub
+	BridgeHubWestend::execute_with(|| {
+		type RuntimeEvent = <BridgeHubWestend as Chain>::RuntimeEvent;
+
+		type EthereumInboundQueue =
+			<BridgeHubWestend as BridgeHubWestendPallet>::EthereumInboundQueue;
+		let message_id: H256 = [0; 32].into();
+		let message = VersionedMessage::V1(MessageV1 {
+			chain_id: CHAIN_ID,
+			command: Command::SendToken {
+				token: WETH.into(),
+				destination: Destination::AccountId32 { id: account_id },
+				amount: 1_000_000,
+				fee,
+			},
+		});
+		let (xcm, _) = EthereumInboundQueue::do_convert(message_id, message).unwrap();
+		assert_ok!(EthereumInboundQueue::send_xcm(xcm, AssetHubWestend::para_id().into()));
+
+		// Check that the message was sent
+		assert_expected_events!(
+			BridgeHubWestend,
+			vec![
+				RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {},
+			]
+		);
+	});
+}
+
+#[test]
+fn send_weth_from_ethereum_to_existent_account_on_asset_hub() {
+	send_weth_from_ethereum_to_asset_hub_with_fee(AssetHubWestendSender::get().into(), XCM_FEE);
+
+	AssetHubWestend::execute_with(|| {
+		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+
+		// Check that the token was received and issued as a foreign asset on AssetHub
+		assert_expected_events!(
+			AssetHubWestend,
+			vec![
+				RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {},
+			]
+		);
+	});
+}
+
+#[test]
+fn send_weth_from_ethereum_to_non_existent_account_on_asset_hub() {
+	send_weth_from_ethereum_to_asset_hub_with_fee([1; 32], XCM_FEE);
+
+	AssetHubWestend::execute_with(|| {
+		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+
+		// Check that the token was received and issued as a foreign asset on AssetHub
+		assert_expected_events!(
+			AssetHubWestend,
+			vec![
+				RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {},
+			]
+		);
+	});
+}
+
+#[test]
+fn send_weth_from_ethereum_to_non_existent_account_on_asset_hub_with_insufficient_fee() {
+	send_weth_from_ethereum_to_asset_hub_with_fee([1; 32], INSUFFICIENT_XCM_FEE);
+
+	AssetHubWestend::execute_with(|| {
+		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+
+		// Check that the message was not processed successfully due to insufficient fee
+
+		assert_expected_events!(
+			AssetHubWestend,
+			vec![
+				RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success:false, .. }) => {},
+			]
+		);
+	});
+}
+
+#[test]
+fn send_weth_from_ethereum_to_non_existent_account_on_asset_hub_with_sufficient_fee_but_do_not_satisfy_ed(
+) {
+	// On AH the xcm fee is 26_789_690 and the ED is 3_300_000
+	send_weth_from_ethereum_to_asset_hub_with_fee([1; 32], 30_000_000);
+
+	AssetHubWestend::execute_with(|| {
+		type RuntimeEvent = <AssetHubWestend as Chain>::RuntimeEvent;
+
+		// Check that the message was not processed successfully due to insufficient ED
+		assert_expected_events!(
+			AssetHubWestend,
+			vec![
+				RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success:false, .. }) => {},
+			]
+		);
+	});
+}
+
 /// Tests the registering of a token as an asset on AssetHub, and then subsequently sending
 /// a token from Ethereum to AssetHub.
 #[test]
-- 
GitLab


From ebde96caf5bf24a626d7de247724a599f106284f Mon Sep 17 00:00:00 2001
From: Sebastian Kunert <skunert49@gmail.com>
Date: Tue, 21 Jan 2025 17:33:47 +0100
Subject: [PATCH 090/116] Fix link-checker job (#7261)

Link-checker job is constantly failing because of these two links.

In the browser there is a redirect, apparently our lychee checker can't
handle it.
---
 polkadot/node/gum/proc-macro/src/lib.rs   | 2 +-
 substrate/frame/contracts/src/schedule.rs | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/polkadot/node/gum/proc-macro/src/lib.rs b/polkadot/node/gum/proc-macro/src/lib.rs
index e8b6b599172..96ff4417a5a 100644
--- a/polkadot/node/gum/proc-macro/src/lib.rs
+++ b/polkadot/node/gum/proc-macro/src/lib.rs
@@ -90,7 +90,7 @@ pub(crate) fn gum(item: proc_macro::TokenStream, level: Level) -> proc_macro::To
 		.add_comment("Generated overseer code by `gum::warn!(..)`".to_owned())
 		// `dry=true` until rust-analyzer can selectively disable features so it's
 		// not all red squiggles. Originally: `!cfg!(feature = "expand")`
-		// ISSUE: <https://github.com/rust-analyzer/rust-analyzer/issues/11777>
+		// ISSUE: https://github.com/rust-lang/rust-analyzer/issues/11777
 		.dry(true)
 		.verbose(false)
 		.fmt(expander::Edition::_2021)
diff --git a/substrate/frame/contracts/src/schedule.rs b/substrate/frame/contracts/src/schedule.rs
index 80b8c54b1e1..285184280fc 100644
--- a/substrate/frame/contracts/src/schedule.rs
+++ b/substrate/frame/contracts/src/schedule.rs
@@ -114,7 +114,7 @@ impl Limits {
 #[scale_info(skip_type_params(T))]
 pub struct InstructionWeights<T: Config> {
 	/// Base instruction `ref_time` Weight.
-	/// Should match to wasmi's `1` fuel (see <https://github.com/paritytech/wasmi/issues/701>).
+	/// Should match to wasmi's `1` fuel (see <https://github.com/wasmi-labs/wasmi/issues/701>).
 	pub base: u32,
 	/// The type parameter is used in the default implementation.
 	#[codec(skip)]
-- 
GitLab


From 9edaef09a69e39b0785f8339f93a3ed6a1f6e023 Mon Sep 17 00:00:00 2001
From: Ludovic_Domingues <ludovic.domingues96@gmail.com>
Date: Tue, 21 Jan 2025 18:36:04 +0100
Subject: [PATCH 091/116] Migrate pallet-paged-list-fuzzer to umbrella crate
 (#6930)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Part of  #6504

---------

Co-authored-by: Bastian Köcher <git@kchr.de>
Co-authored-by: Giuseppe Re <giuseppe.re@parity.io>
---
 Cargo.lock                                          |  3 +--
 substrate/frame/paged-list/fuzzer/Cargo.toml        | 11 ++++++++---
 substrate/frame/paged-list/fuzzer/src/paged_list.rs |  7 +++++--
 3 files changed, 14 insertions(+), 7 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock
index 397d0c7fe82..55cc1721bdd 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -14608,10 +14608,9 @@ name = "pallet-paged-list-fuzzer"
 version = "0.1.0"
 dependencies = [
  "arbitrary",
- "frame-support 28.0.0",
  "honggfuzz",
  "pallet-paged-list 0.6.0",
- "sp-io 30.0.0",
+ "polkadot-sdk-frame 0.1.0",
 ]
 
 [[package]]
diff --git a/substrate/frame/paged-list/fuzzer/Cargo.toml b/substrate/frame/paged-list/fuzzer/Cargo.toml
index 7e6162df09b..32535093b59 100644
--- a/substrate/frame/paged-list/fuzzer/Cargo.toml
+++ b/substrate/frame/paged-list/fuzzer/Cargo.toml
@@ -18,8 +18,13 @@ path = "src/paged_list.rs"
 
 [dependencies]
 arbitrary = { workspace = true }
+frame = { workspace = true, features = ["runtime"] }
 honggfuzz = { workspace = true }
-
-frame-support = { features = ["std"], workspace = true }
 pallet-paged-list = { features = ["std"], workspace = true }
-sp-io = { features = ["std"], workspace = true }
+
+[features]
+default = ["std"]
+std = [
+	"frame/std",
+	"pallet-paged-list/std",
+]
diff --git a/substrate/frame/paged-list/fuzzer/src/paged_list.rs b/substrate/frame/paged-list/fuzzer/src/paged_list.rs
index 43b797eee6b..f0f914de142 100644
--- a/substrate/frame/paged-list/fuzzer/src/paged_list.rs
+++ b/substrate/frame/paged-list/fuzzer/src/paged_list.rs
@@ -30,9 +30,12 @@
 use arbitrary::Arbitrary;
 use honggfuzz::fuzz;
 
-use frame_support::{storage::StorageList, StorageNoopGuard};
+use frame::{
+	prelude::*, runtime::prelude::storage::storage_noop_guard::StorageNoopGuard,
+	testing_prelude::TestExternalities,
+};
+
 use pallet_paged_list::mock::{PagedList as List, *};
-use sp_io::TestExternalities;
 type Meta = MetaOf<Test, ()>;
 
 fn main() {
-- 
GitLab


From 2345eb9a5b5e2145ac1c04fd9cf1fcf12b7278b6 Mon Sep 17 00:00:00 2001
From: Javier Viola <363911+pepoviola@users.noreply.github.com>
Date: Tue, 21 Jan 2025 18:24:05 -0300
Subject: [PATCH 092/116] Bump zombienet version to `v1.3.119` (#7283)

This version include a fix that make test
`zombienet-polkadot-malus-0001-dispute-valid` green again.
Thx!
---
 .gitlab/pipeline/zombienet.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml
index 08bfed2e24c..c48bca8af48 100644
--- a/.gitlab/pipeline/zombienet.yml
+++ b/.gitlab/pipeline/zombienet.yml
@@ -1,7 +1,7 @@
 .zombienet-refs:
   extends: .build-refs
   variables:
-    ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.116"
+    ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.119"
     PUSHGATEWAY_URL: "http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics"
     DEBUG: "zombie,zombie::network-node,zombie::kube::client::logs"
     ZOMBIE_PROVIDER: "k8s"
-- 
GitLab


From 1bdb817f2b140b0c2573396146fd7bbfb936af10 Mon Sep 17 00:00:00 2001
From: Serban Iorga <serban@parity.io>
Date: Wed, 22 Jan 2025 12:01:28 +0200
Subject: [PATCH 093/116] Enable BEEFY `report_fork_voting()` (#6856)

Related to https://github.com/paritytech/polkadot-sdk/issues/4523

Follow-up for: https://github.com/paritytech/polkadot-sdk/pull/5188

Reopening https://github.com/paritytech/polkadot-sdk/pull/6732 as a new
PR

---------

Co-authored-by: command-bot <>
---
 Cargo.lock                                    | 14 +++-
 .../rococo/src/weights/pallet_beefy_mmr.rs    | 37 ++++++---
 .../westend/src/weights/pallet_beefy_mmr.rs   | 37 ++++++---
 prdoc/pr_6856.prdoc                           | 28 +++++++
 substrate/frame/beefy-mmr/src/benchmarking.rs | 18 +++++
 substrate/frame/beefy-mmr/src/lib.rs          | 16 ++++
 substrate/frame/beefy-mmr/src/weights.rs      | 81 +++++++++++++------
 substrate/frame/beefy/src/equivocation.rs     |  8 +-
 substrate/frame/beefy/src/lib.rs              | 10 +--
 substrate/frame/beefy/src/mock.rs             |  9 +++
 substrate/frame/beefy/src/tests.rs            | 56 ++++++++++++-
 .../frame/merkle-mountain-range/src/lib.rs    |  6 ++
 .../merkle-mountain-range/src/mmr/mmr.rs      | 18 ++++-
 .../merkle-mountain-range/src/mmr/mod.rs      |  2 +-
 .../frame/merkle-mountain-range/src/tests.rs  |  1 +
 .../primitives/consensus/beefy/src/lib.rs     |  6 ++
 .../merkle-mountain-range/Cargo.toml          |  2 +-
 17 files changed, 278 insertions(+), 71 deletions(-)
 create mode 100644 prdoc/pr_6856.prdoc

diff --git a/Cargo.lock b/Cargo.lock
index 55cc1721bdd..7e41b7e9937 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -17263,6 +17263,16 @@ dependencies = [
  "itertools 0.10.5",
 ]
 
+[[package]]
+name = "polkadot-ckb-merkle-mountain-range"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "221c71b432b38e494a0fdedb5f720e4cb974edf03a0af09e5b2238dbac7e6947"
+dependencies = [
+ "cfg-if",
+ "itertools 0.10.5",
+]
+
 [[package]]
 name = "polkadot-cli"
 version = "7.0.0"
@@ -26958,7 +26968,7 @@ dependencies = [
  "array-bytes",
  "log",
  "parity-scale-codec",
- "polkadot-ckb-merkle-mountain-range",
+ "polkadot-ckb-merkle-mountain-range 0.8.1",
  "scale-info",
  "serde",
  "sp-api 26.0.0",
@@ -26976,7 +26986,7 @@ checksum = "9a12dd76e368f1e48144a84b4735218b712f84b3f976970e2f25a29b30440e10"
 dependencies = [
  "log",
  "parity-scale-codec",
- "polkadot-ckb-merkle-mountain-range",
+ "polkadot-ckb-merkle-mountain-range 0.7.0",
  "scale-info",
  "serde",
  "sp-api 34.0.0",
diff --git a/polkadot/runtime/rococo/src/weights/pallet_beefy_mmr.rs b/polkadot/runtime/rococo/src/weights/pallet_beefy_mmr.rs
index 317c9149ec6..54989c4f549 100644
--- a/polkadot/runtime/rococo/src/weights/pallet_beefy_mmr.rs
+++ b/polkadot/runtime/rococo/src/weights/pallet_beefy_mmr.rs
@@ -17,9 +17,9 @@
 //! Autogenerated weights for `pallet_beefy_mmr`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-08-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-12-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024
 
 // Executed Command:
@@ -48,14 +48,25 @@ use core::marker::PhantomData;
 /// Weight functions for `pallet_beefy_mmr`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
+	/// The range of component `n` is `[2, 512]`.
+	fn n_leafs_proof_is_optimal(n: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 622_000 picoseconds.
+		Weight::from_parts(1_166_954, 0)
+			.saturating_add(Weight::from_parts(0, 0))
+			// Standard Error: 65
+			.saturating_add(Weight::from_parts(1_356, 0).saturating_mul(n.into()))
+	}
 	/// Storage: `System::BlockHash` (r:1 w:0)
 	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn extract_validation_context() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
+		//  Measured:  `68`
 		//  Estimated: `3509`
-		// Minimum execution time: 7_116_000 picoseconds.
-		Weight::from_parts(7_343_000, 0)
+		// Minimum execution time: 6_272_000 picoseconds.
+		Weight::from_parts(6_452_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -63,10 +74,10 @@ impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
 	/// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
 	fn read_peak() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `234`
+		//  Measured:  `254`
 		//  Estimated: `3505`
-		// Minimum execution time: 5_652_000 picoseconds.
-		Weight::from_parts(5_963_000, 0)
+		// Minimum execution time: 6_576_000 picoseconds.
+		Weight::from_parts(6_760_000, 0)
 			.saturating_add(Weight::from_parts(0, 3505))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -77,13 +88,13 @@ impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
 	/// The range of component `n` is `[2, 512]`.
 	fn n_items_proof_is_non_canonical(n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `226`
+		//  Measured:  `246`
 		//  Estimated: `1517`
-		// Minimum execution time: 11_953_000 picoseconds.
-		Weight::from_parts(15_978_891, 0)
+		// Minimum execution time: 12_538_000 picoseconds.
+		Weight::from_parts(24_516_023, 0)
 			.saturating_add(Weight::from_parts(0, 1517))
-			// Standard Error: 1_780
-			.saturating_add(Weight::from_parts(1_480_582, 0).saturating_mul(n.into()))
+			// Standard Error: 1_923
+			.saturating_add(Weight::from_parts(1_426_781, 0).saturating_mul(n.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 	}
 }
diff --git a/polkadot/runtime/westend/src/weights/pallet_beefy_mmr.rs b/polkadot/runtime/westend/src/weights/pallet_beefy_mmr.rs
index 5be207e3fcf..8de9f6ab53e 100644
--- a/polkadot/runtime/westend/src/weights/pallet_beefy_mmr.rs
+++ b/polkadot/runtime/westend/src/weights/pallet_beefy_mmr.rs
@@ -17,9 +17,9 @@
 //! Autogenerated weights for `pallet_beefy_mmr`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-08-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-12-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
@@ -48,14 +48,25 @@ use core::marker::PhantomData;
 /// Weight functions for `pallet_beefy_mmr`.
 pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
+	/// The range of component `n` is `[2, 512]`.
+	fn n_leafs_proof_is_optimal(n: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 628_000 picoseconds.
+		Weight::from_parts(1_200_102, 0)
+			.saturating_add(Weight::from_parts(0, 0))
+			// Standard Error: 63
+			.saturating_add(Weight::from_parts(1_110, 0).saturating_mul(n.into()))
+	}
 	/// Storage: `System::BlockHash` (r:1 w:0)
 	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn extract_validation_context() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `92`
+		//  Measured:  `68`
 		//  Estimated: `3509`
-		// Minimum execution time: 7_850_000 picoseconds.
-		Weight::from_parts(8_169_000, 0)
+		// Minimum execution time: 9_862_000 picoseconds.
+		Weight::from_parts(10_329_000, 0)
 			.saturating_add(Weight::from_parts(0, 3509))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -63,10 +74,10 @@ impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
 	/// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
 	fn read_peak() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `201`
+		//  Measured:  `221`
 		//  Estimated: `3505`
-		// Minimum execution time: 6_852_000 picoseconds.
-		Weight::from_parts(7_448_000, 0)
+		// Minimum execution time: 6_396_000 picoseconds.
+		Weight::from_parts(6_691_000, 0)
 			.saturating_add(Weight::from_parts(0, 3505))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
@@ -77,13 +88,13 @@ impl<T: frame_system::Config> pallet_beefy_mmr::WeightInfo for WeightInfo<T> {
 	/// The range of component `n` is `[2, 512]`.
 	fn n_items_proof_is_non_canonical(n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `193`
+		//  Measured:  `213`
 		//  Estimated: `1517`
-		// Minimum execution time: 12_860_000 picoseconds.
-		Weight::from_parts(17_158_162, 0)
+		// Minimum execution time: 12_553_000 picoseconds.
+		Weight::from_parts(24_003_920, 0)
 			.saturating_add(Weight::from_parts(0, 1517))
-			// Standard Error: 1_732
-			.saturating_add(Weight::from_parts(1_489_410, 0).saturating_mul(n.into()))
+			// Standard Error: 2_023
+			.saturating_add(Weight::from_parts(1_390_986, 0).saturating_mul(n.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 	}
 }
diff --git a/prdoc/pr_6856.prdoc b/prdoc/pr_6856.prdoc
new file mode 100644
index 00000000000..480c3acea19
--- /dev/null
+++ b/prdoc/pr_6856.prdoc
@@ -0,0 +1,28 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Enable report_fork_voting()
+
+doc:
+  - audience:
+    - Runtime Dev
+    - Runtime User
+    description: |
+      This PR enables calling `report_fork_voting`.
+      In order to do this we needed to also check that the ancestry proof is optimal.
+
+crates:
+  - name: pallet-mmr
+    bump: minor
+  - name: sp-mmr-primitives
+    bump: minor
+  - name: sp-consensus-beefy
+    bump: minor
+  - name: rococo-runtime
+    bump: minor
+  - name: pallet-beefy
+    bump: minor
+  - name: pallet-beefy-mmr
+    bump: minor
+  - name: westend-runtime
+    bump: minor
diff --git a/substrate/frame/beefy-mmr/src/benchmarking.rs b/substrate/frame/beefy-mmr/src/benchmarking.rs
index fea6a2078f0..4fddd1bccf1 100644
--- a/substrate/frame/beefy-mmr/src/benchmarking.rs
+++ b/substrate/frame/beefy-mmr/src/benchmarking.rs
@@ -49,6 +49,24 @@ fn init_block<T: Config>(block_num: u32) {
 mod benchmarks {
 	use super::*;
 
+	/// Generate ancestry proofs with `n` leafs and benchmark the logic that checks
+	/// if the proof is optimal.
+	#[benchmark]
+	fn n_leafs_proof_is_optimal(n: Linear<2, 512>) {
+		pallet_mmr::UseLocalStorage::<T>::set(true);
+
+		for block_num in 1..=n {
+			init_block::<T>(block_num);
+		}
+		let proof = Mmr::<T>::generate_mock_ancestry_proof().unwrap();
+		assert_eq!(proof.leaf_count, n as u64);
+
+		#[block]
+		{
+			<BeefyMmr<T> as AncestryHelper<HeaderFor<T>>>::is_proof_optimal(&proof);
+		};
+	}
+
 	#[benchmark]
 	fn extract_validation_context() {
 		pallet_mmr::UseLocalStorage::<T>::set(true);
diff --git a/substrate/frame/beefy-mmr/src/lib.rs b/substrate/frame/beefy-mmr/src/lib.rs
index ef99bc1e9cf..c7fcdeff879 100644
--- a/substrate/frame/beefy-mmr/src/lib.rs
+++ b/substrate/frame/beefy-mmr/src/lib.rs
@@ -210,6 +210,18 @@ where
 			.ok()
 	}
 
+	fn is_proof_optimal(proof: &Self::Proof) -> bool {
+		let is_proof_optimal = pallet_mmr::Pallet::<T>::is_ancestry_proof_optimal(proof);
+
+		// We don't check the proof size when running benchmarks, since we use mock proofs
+		// which would cause the test to fail.
+		if cfg!(feature = "runtime-benchmarks") {
+			return true
+		}
+
+		is_proof_optimal
+	}
+
 	fn extract_validation_context(header: HeaderFor<T>) -> Option<Self::ValidationContext> {
 		// Check if the provided header is canonical.
 		let expected_hash = frame_system::Pallet::<T>::block_hash(header.number());
@@ -292,6 +304,10 @@ impl<T: Config> AncestryHelperWeightInfo<HeaderFor<T>> for Pallet<T>
 where
 	T: pallet_mmr::Config<Hashing = sp_consensus_beefy::MmrHashing>,
 {
+	fn is_proof_optimal(proof: &<Self as AncestryHelper<HeaderFor<T>>>::Proof) -> Weight {
+		<T as Config>::WeightInfo::n_leafs_proof_is_optimal(proof.leaf_count.saturated_into())
+	}
+
 	fn extract_validation_context() -> Weight {
 		<T as Config>::WeightInfo::extract_validation_context()
 	}
diff --git a/substrate/frame/beefy-mmr/src/weights.rs b/substrate/frame/beefy-mmr/src/weights.rs
index dcfdb560ee9..5f7f7055311 100644
--- a/substrate/frame/beefy-mmr/src/weights.rs
+++ b/substrate/frame/beefy-mmr/src/weights.rs
@@ -51,6 +51,7 @@ use core::marker::PhantomData;
 
 /// Weight functions needed for `pallet_beefy_mmr`.
 pub trait WeightInfo {
+	fn n_leafs_proof_is_optimal(n: u32, ) -> Weight;
 	fn extract_validation_context() -> Weight;
 	fn read_peak() -> Weight;
 	fn n_items_proof_is_non_canonical(n: u32, ) -> Weight;
@@ -59,25 +60,38 @@ pub trait WeightInfo {
 /// Weights for `pallet_beefy_mmr` using the Substrate node and recommended hardware.
 pub struct SubstrateWeight<T>(PhantomData<T>);
 impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
+	/// The range of component `n` is `[2, 512]`.
+	fn n_leafs_proof_is_optimal(n: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 622_000 picoseconds.
+		Weight::from_parts(1_166_954, 0)
+			.saturating_add(Weight::from_parts(0, 0))
+			// Standard Error: 65
+			.saturating_add(Weight::from_parts(1_356, 0).saturating_mul(n.into()))
+	}
 	/// Storage: `System::BlockHash` (r:1 w:0)
 	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn extract_validation_context() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `68`
 		//  Estimated: `3509`
-		// Minimum execution time: 6_687_000 picoseconds.
-		Weight::from_parts(6_939_000, 3509)
-			.saturating_add(T::DbWeight::get().reads(1_u64))
+		// Minimum execution time: 6_272_000 picoseconds.
+		Weight::from_parts(6_452_000, 0)
+			.saturating_add(Weight::from_parts(0, 3509))
+			.saturating_add(T::DbWeight::get().reads(1))
 	}
 	/// Storage: `Mmr::Nodes` (r:1 w:0)
 	/// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
 	fn read_peak() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `386`
+		//  Measured:  `254`
 		//  Estimated: `3505`
-		// Minimum execution time: 10_409_000 picoseconds.
-		Weight::from_parts(10_795_000, 3505)
-			.saturating_add(T::DbWeight::get().reads(1_u64))
+		// Minimum execution time: 6_576_000 picoseconds.
+		Weight::from_parts(6_760_000, 0)
+			.saturating_add(Weight::from_parts(0, 3505))
+			.saturating_add(T::DbWeight::get().reads(1))
 	}
 	/// Storage: `Mmr::RootHash` (r:1 w:0)
 	/// Proof: `Mmr::RootHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
@@ -86,37 +100,51 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `n` is `[2, 512]`.
 	fn n_items_proof_is_non_canonical(n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `378`
+		//  Measured:  `246`
 		//  Estimated: `1517`
-		// Minimum execution time: 15_459_000 picoseconds.
-		Weight::from_parts(21_963_366, 1517)
-			// Standard Error: 1_528
-			.saturating_add(Weight::from_parts(984_907, 0).saturating_mul(n.into()))
-			.saturating_add(T::DbWeight::get().reads(2_u64))
+		// Minimum execution time: 12_538_000 picoseconds.
+		Weight::from_parts(24_516_023, 0)
+			.saturating_add(Weight::from_parts(0, 1517))
+			// Standard Error: 1_923
+			.saturating_add(Weight::from_parts(1_426_781, 0).saturating_mul(n.into()))
+			.saturating_add(T::DbWeight::get().reads(2))
 	}
 }
 
 // For backwards compatibility and tests.
 impl WeightInfo for () {
+	/// The range of component `n` is `[2, 512]`.
+	fn n_leafs_proof_is_optimal(n: u32, ) -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 622_000 picoseconds.
+		Weight::from_parts(1_166_954, 0)
+			.saturating_add(Weight::from_parts(0, 0))
+			// Standard Error: 65
+			.saturating_add(Weight::from_parts(1_356, 0).saturating_mul(n.into()))
+	}
 	/// Storage: `System::BlockHash` (r:1 w:0)
 	/// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`)
 	fn extract_validation_context() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `68`
 		//  Estimated: `3509`
-		// Minimum execution time: 6_687_000 picoseconds.
-		Weight::from_parts(6_939_000, 3509)
-			.saturating_add(RocksDbWeight::get().reads(1_u64))
+		// Minimum execution time: 6_272_000 picoseconds.
+		Weight::from_parts(6_452_000, 0)
+			.saturating_add(Weight::from_parts(0, 3509))
+			.saturating_add(RocksDbWeight::get().reads(1))
 	}
 	/// Storage: `Mmr::Nodes` (r:1 w:0)
 	/// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`)
 	fn read_peak() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `386`
+		//  Measured:  `254`
 		//  Estimated: `3505`
-		// Minimum execution time: 10_409_000 picoseconds.
-		Weight::from_parts(10_795_000, 3505)
-			.saturating_add(RocksDbWeight::get().reads(1_u64))
+		// Minimum execution time: 6_576_000 picoseconds.
+		Weight::from_parts(6_760_000, 0)
+			.saturating_add(Weight::from_parts(0, 3505))
+			.saturating_add(RocksDbWeight::get().reads(1))
 	}
 	/// Storage: `Mmr::RootHash` (r:1 w:0)
 	/// Proof: `Mmr::RootHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`)
@@ -125,12 +153,13 @@ impl WeightInfo for () {
 	/// The range of component `n` is `[2, 512]`.
 	fn n_items_proof_is_non_canonical(n: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `378`
+		//  Measured:  `246`
 		//  Estimated: `1517`
-		// Minimum execution time: 15_459_000 picoseconds.
-		Weight::from_parts(21_963_366, 1517)
-			// Standard Error: 1_528
-			.saturating_add(Weight::from_parts(984_907, 0).saturating_mul(n.into()))
-			.saturating_add(RocksDbWeight::get().reads(2_u64))
+		// Minimum execution time: 12_538_000 picoseconds.
+		Weight::from_parts(24_516_023, 0)
+			.saturating_add(Weight::from_parts(0, 1517))
+			// Standard Error: 1_923
+			.saturating_add(Weight::from_parts(1_426_781, 0).saturating_mul(n.into()))
+			.saturating_add(RocksDbWeight::get().reads(2))
 	}
 }
diff --git a/substrate/frame/beefy/src/equivocation.rs b/substrate/frame/beefy/src/equivocation.rs
index 3a49b9e169c..294d64427ef 100644
--- a/substrate/frame/beefy/src/equivocation.rs
+++ b/substrate/frame/beefy/src/equivocation.rs
@@ -207,11 +207,17 @@ impl<T: Config> EquivocationEvidenceFor<T> {
 					return Err(Error::<T>::InvalidDoubleVotingProof);
 				}
 
-				return Ok(())
+				Ok(())
 			},
 			EquivocationEvidenceFor::ForkVotingProof(equivocation_proof, _) => {
 				let ForkVotingProof { vote, ancestry_proof, header } = equivocation_proof;
 
+				if !<T::AncestryHelper as AncestryHelper<HeaderFor<T>>>::is_proof_optimal(
+					&ancestry_proof,
+				) {
+					return Err(Error::<T>::InvalidForkVotingProof);
+				}
+
 				let maybe_validation_context = <T::AncestryHelper as AncestryHelper<
 					HeaderFor<T>,
 				>>::extract_validation_context(header);
diff --git a/substrate/frame/beefy/src/lib.rs b/substrate/frame/beefy/src/lib.rs
index cf690a9df33..e57fc0e21bc 100644
--- a/substrate/frame/beefy/src/lib.rs
+++ b/substrate/frame/beefy/src/lib.rs
@@ -755,7 +755,8 @@ pub(crate) trait WeightInfoExt: WeightInfo {
 		max_nominators_per_validator: u32,
 		ancestry_proof: &<T::AncestryHelper as AncestryHelper<HeaderFor<T>>>::Proof,
 	) -> Weight {
-		let _weight = <T::AncestryHelper as AncestryHelperWeightInfo<HeaderFor<T>>>::extract_validation_context()
+		<T::AncestryHelper as AncestryHelperWeightInfo<HeaderFor<T>>>::is_proof_optimal(&ancestry_proof)
+			.saturating_add(<T::AncestryHelper as AncestryHelperWeightInfo<HeaderFor<T>>>::extract_validation_context())
 			.saturating_add(
 				<T::AncestryHelper as AncestryHelperWeightInfo<HeaderFor<T>>>::is_non_canonical(
 					ancestry_proof,
@@ -765,12 +766,7 @@ pub(crate) trait WeightInfoExt: WeightInfo {
 				1,
 				validator_count,
 				max_nominators_per_validator,
-			));
-
-		// TODO: https://github.com/paritytech/polkadot-sdk/issues/4523 - return `_weight` here.
-		// We return `Weight::MAX` currently in order to disallow this extrinsic for the moment.
-		// We need to check that the proof is optimal.
-		Weight::MAX
+			))
 	}
 
 	fn report_future_block_voting(
diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs
index 38e0cc4cfc2..4b5f1d103b5 100644
--- a/substrate/frame/beefy/src/mock.rs
+++ b/substrate/frame/beefy/src/mock.rs
@@ -99,6 +99,7 @@ pub struct MockAncestryProofContext {
 
 #[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)]
 pub struct MockAncestryProof {
+	pub is_optimal: bool,
 	pub is_non_canonical: bool,
 }
 
@@ -128,6 +129,10 @@ impl<Header: HeaderT> AncestryHelper<Header> for MockAncestryHelper {
 		unimplemented!()
 	}
 
+	fn is_proof_optimal(proof: &Self::Proof) -> bool {
+		proof.is_optimal
+	}
+
 	fn extract_validation_context(_header: Header) -> Option<Self::ValidationContext> {
 		AncestryProofContext::get()
 	}
@@ -142,6 +147,10 @@ impl<Header: HeaderT> AncestryHelper<Header> for MockAncestryHelper {
 }
 
 impl<Header: HeaderT> AncestryHelperWeightInfo<Header> for MockAncestryHelper {
+	fn is_proof_optimal(_proof: &<Self as AncestryHelper<HeaderFor<Test>>>::Proof) -> Weight {
+		unimplemented!()
+	}
+
 	fn extract_validation_context() -> Weight {
 		unimplemented!()
 	}
diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs
index 89645d21f6b..1bd0a72b25e 100644
--- a/substrate/frame/beefy/src/tests.rs
+++ b/substrate/frame/beefy/src/tests.rs
@@ -799,7 +799,7 @@ fn report_fork_voting(
 	let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![42]);
 	let equivocation_proof = generate_fork_voting_proof(
 		(block_num, payload, set_id, &equivocation_keyring),
-		MockAncestryProof { is_non_canonical: true },
+		MockAncestryProof { is_optimal: true, is_non_canonical: true },
 		System::finalize(),
 	);
 
@@ -835,6 +835,54 @@ fn report_fork_voting_invalid_key_owner_proof() {
 	report_equivocation_invalid_key_owner_proof(report_fork_voting);
 }
 
+#[test]
+fn report_fork_voting_non_optimal_equivocation_proof() {
+	let authorities = test_authorities();
+
+	let mut ext = ExtBuilder::default().add_authorities(authorities).build();
+
+	let mut era = 1;
+	let (block_num, set_id, equivocation_keyring, key_owner_proof) = ext.execute_with(|| {
+		start_era(era);
+		let block_num = System::block_number();
+
+		let validator_set = Beefy::validator_set().unwrap();
+		let authorities = validator_set.validators();
+		let set_id = validator_set.id();
+
+		let equivocation_authority_index = 0;
+		let equivocation_key = &authorities[equivocation_authority_index];
+		let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap();
+
+		// generate a key ownership proof at set id in era 1
+		let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap();
+
+		era += 1;
+		start_era(era);
+		(block_num, set_id, equivocation_keyring, key_owner_proof)
+	});
+	ext.persist_offchain_overlay();
+
+	ext.execute_with(|| {
+		let payload = Payload::from_single_entry(MMR_ROOT_ID, vec![42]);
+
+		// Simulate non optimal equivocation proof.
+		let equivocation_proof = generate_fork_voting_proof(
+			(block_num + 1, payload.clone(), set_id, &equivocation_keyring),
+			MockAncestryProof { is_optimal: false, is_non_canonical: true },
+			System::finalize(),
+		);
+		assert_err!(
+			Beefy::report_fork_voting_unsigned(
+				RuntimeOrigin::none(),
+				Box::new(equivocation_proof),
+				key_owner_proof.clone(),
+			),
+			Error::<Test>::InvalidForkVotingProof,
+		);
+	});
+}
+
 #[test]
 fn report_fork_voting_invalid_equivocation_proof() {
 	let authorities = test_authorities();
@@ -869,7 +917,7 @@ fn report_fork_voting_invalid_equivocation_proof() {
 		// vote signed with a key that isn't part of the authority set
 		let equivocation_proof = generate_fork_voting_proof(
 			(block_num, payload.clone(), set_id, &BeefyKeyring::Dave),
-			MockAncestryProof { is_non_canonical: true },
+			MockAncestryProof { is_optimal: true, is_non_canonical: true },
 			System::finalize(),
 		);
 		assert_err!(
@@ -884,7 +932,7 @@ fn report_fork_voting_invalid_equivocation_proof() {
 		// Simulate InvalidForkVotingProof error.
 		let equivocation_proof = generate_fork_voting_proof(
 			(block_num + 1, payload.clone(), set_id, &equivocation_keyring),
-			MockAncestryProof { is_non_canonical: false },
+			MockAncestryProof { is_optimal: true, is_non_canonical: false },
 			System::finalize(),
 		);
 		assert_err!(
@@ -945,7 +993,7 @@ fn report_fork_voting_invalid_context() {
 		// different payload than finalized
 		let equivocation_proof = generate_fork_voting_proof(
 			(block_num, payload, set_id, &equivocation_keyring),
-			MockAncestryProof { is_non_canonical: true },
+			MockAncestryProof { is_optimal: true, is_non_canonical: true },
 			System::finalize(),
 		);
 
diff --git a/substrate/frame/merkle-mountain-range/src/lib.rs b/substrate/frame/merkle-mountain-range/src/lib.rs
index 76d6c2a1ac7..cc64dfcb7de 100644
--- a/substrate/frame/merkle-mountain-range/src/lib.rs
+++ b/substrate/frame/merkle-mountain-range/src/lib.rs
@@ -445,6 +445,12 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 		mmr.generate_mock_ancestry_proof()
 	}
 
+	pub fn is_ancestry_proof_optimal(
+		ancestry_proof: &primitives::AncestryProof<HashOf<T, I>>,
+	) -> bool {
+		mmr::is_ancestry_proof_optimal::<HashingOf<T, I>>(ancestry_proof)
+	}
+
 	pub fn verify_ancestry_proof(
 		root: HashOf<T, I>,
 		ancestry_proof: AncestryProof<HashOf<T, I>>,
diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs
index a9818ba4710..69a08a8b2d6 100644
--- a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs
+++ b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs
@@ -63,6 +63,18 @@ where
 		.map_err(|e| Error::Verify.log_debug(e))
 }
 
+pub fn is_ancestry_proof_optimal<H>(ancestry_proof: &AncestryProof<H::Output>) -> bool
+where
+	H: frame::traits::Hash,
+{
+	let prev_mmr_size = NodesUtils::new(ancestry_proof.prev_leaf_count).size();
+	let mmr_size = NodesUtils::new(ancestry_proof.leaf_count).size();
+
+	let expected_proof_size =
+		mmr_lib::ancestry_proof::expected_ancestry_proof_size(prev_mmr_size, mmr_size);
+	ancestry_proof.items.len() == expected_proof_size
+}
+
 pub fn verify_ancestry_proof<H, L>(
 	root: H::Output,
 	ancestry_proof: AncestryProof<H::Output>,
@@ -83,9 +95,9 @@ where
 	);
 
 	let raw_ancestry_proof = mmr_lib::AncestryProof::<Node<H, L>, Hasher<H, L>> {
+		prev_mmr_size: mmr_lib::helper::leaf_index_to_mmr_size(ancestry_proof.prev_leaf_count - 1),
 		prev_peaks: ancestry_proof.prev_peaks.into_iter().map(|hash| Node::Hash(hash)).collect(),
-		prev_size: mmr_lib::helper::leaf_index_to_mmr_size(ancestry_proof.prev_leaf_count - 1),
-		proof: prev_peaks_proof,
+		prev_peaks_proof,
 	};
 
 	let prev_root = mmr_lib::ancestry_proof::bagging_peaks_hashes::<Node<H, L>, Hasher<H, L>>(
@@ -248,7 +260,7 @@ where
 			prev_leaf_count,
 			leaf_count: self.leaves,
 			items: raw_ancestry_proof
-				.proof
+				.prev_peaks_proof
 				.proof_items()
 				.iter()
 				.map(|(index, item)| (*index, item.hash()))
diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mod.rs b/substrate/frame/merkle-mountain-range/src/mmr/mod.rs
index 85d00f8a65d..d3232f23bce 100644
--- a/substrate/frame/merkle-mountain-range/src/mmr/mod.rs
+++ b/substrate/frame/merkle-mountain-range/src/mmr/mod.rs
@@ -18,7 +18,7 @@
 mod mmr;
 pub mod storage;
 
-pub use self::mmr::{verify_ancestry_proof, verify_leaves_proof, Mmr};
+pub use self::mmr::{is_ancestry_proof_optimal, verify_ancestry_proof, verify_leaves_proof, Mmr};
 use crate::primitives::{mmr_lib, DataOrHash, FullLeaf};
 use frame::traits;
 
diff --git a/substrate/frame/merkle-mountain-range/src/tests.rs b/substrate/frame/merkle-mountain-range/src/tests.rs
index ae0c58e91ab..03b08e51c32 100644
--- a/substrate/frame/merkle-mountain-range/src/tests.rs
+++ b/substrate/frame/merkle-mountain-range/src/tests.rs
@@ -811,6 +811,7 @@ fn generating_and_verifying_ancestry_proofs_works_correctly() {
 		for prev_block_number in 1usize..=500 {
 			let proof =
 				Pallet::<Test>::generate_ancestry_proof(prev_block_number as u64, None).unwrap();
+			assert!(Pallet::<Test>::is_ancestry_proof_optimal(&proof));
 			assert_eq!(
 				Pallet::<Test>::verify_ancestry_proof(root, proof),
 				Ok(prev_roots[prev_block_number - 1])
diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs
index e977fb0ea25..0f57cdfc810 100644
--- a/substrate/primitives/consensus/beefy/src/lib.rs
+++ b/substrate/primitives/consensus/beefy/src/lib.rs
@@ -449,6 +449,9 @@ pub trait AncestryHelper<Header: HeaderT> {
 		best_known_block_number: Option<Header::Number>,
 	) -> Option<Self::Proof>;
 
+	/// Check if the proof is optimal.
+	fn is_proof_optimal(proof: &Self::Proof) -> bool;
+
 	/// Extract the validation context from the provided header.
 	fn extract_validation_context(header: Header) -> Option<Self::ValidationContext>;
 
@@ -463,6 +466,9 @@ pub trait AncestryHelper<Header: HeaderT> {
 
 /// Weight information for the logic in `AncestryHelper`.
 pub trait AncestryHelperWeightInfo<Header: HeaderT>: AncestryHelper<Header> {
+	/// Weight info for the `AncestryHelper::is_proof_optimal()` method.
+	fn is_proof_optimal(proof: &<Self as AncestryHelper<Header>>::Proof) -> Weight;
+
 	/// Weight info for the `AncestryHelper::extract_validation_context()` method.
 	fn extract_validation_context() -> Weight;
 
diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml
index 5f861ca7acf..0d8a67da7ca 100644
--- a/substrate/primitives/merkle-mountain-range/Cargo.toml
+++ b/substrate/primitives/merkle-mountain-range/Cargo.toml
@@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 [dependencies]
 codec = { workspace = true }
 log = { workspace = true }
-mmr-lib = { package = "polkadot-ckb-merkle-mountain-range", version = "0.7.0", default-features = false }
+mmr-lib = { package = "polkadot-ckb-merkle-mountain-range", version = "0.8.1", default-features = false }
 scale-info = { features = ["derive"], workspace = true }
 serde = { features = ["alloc", "derive"], optional = true, workspace = true }
 sp-api = { workspace = true }
-- 
GitLab


From 4eb9228840be0abef1c45cf8fa8bc44b5f95200a Mon Sep 17 00:00:00 2001
From: Stephane Gurgenidze <59443568+sw10pa@users.noreply.github.com>
Date: Wed, 22 Jan 2025 15:00:50 +0400
Subject: [PATCH 094/116] collation-generation: resolve mismatch between
 descriptor and commitments core index (#7104)

## Issue
[[#7107] Core Index Mismatch in Commitments and
Descriptor](https://github.com/paritytech/polkadot-sdk/issues/7107)

## Description
This PR resolves a bug where normal (non-malus) undying collators failed
to generate and submit collations, resulting in the following error:

`ERROR tokio-runtime-worker parachain::collation-generation: Failed to
construct and distribute collation: V2 core index check failed: The core
index in commitments doesn't match the one in descriptor.`

More details about the issue and reproduction steps are described in the
[related issue](https://github.com/paritytech/polkadot-sdk/issues/7107).

## Summary of Fix
- When core selectors are provided in the UMP signals, core indexes will
be chosen using them;
- The fix ensures that functionality remains unchanged for parachains
not using UMP signals;
- Added checks to stop processing if the same core is selected
repeatedly.

## TODO
- [X] Implement the fix;
- [x] Add tests;
- [x] Add PRdoc.
---
 polkadot/node/collation-generation/src/lib.rs |  88 ++++++-
 .../node/collation-generation/src/tests.rs    | 216 ++++++++++++++++--
 prdoc/pr_7104.prdoc                           |  23 ++
 3 files changed, 299 insertions(+), 28 deletions(-)
 create mode 100644 prdoc/pr_7104.prdoc

diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs
index b371017a828..3c8a216f5f3 100644
--- a/polkadot/node/collation-generation/src/lib.rs
+++ b/polkadot/node/collation-generation/src/lib.rs
@@ -53,7 +53,7 @@ use polkadot_primitives::{
 	node_features::FeatureIndex,
 	vstaging::{
 		transpose_claim_queue, CandidateDescriptorV2, CandidateReceiptV2 as CandidateReceipt,
-		CommittedCandidateReceiptV2, TransposedClaimQueue,
+		ClaimQueueOffset, CommittedCandidateReceiptV2, TransposedClaimQueue,
 	},
 	CandidateCommitments, CandidateDescriptor, CollatorPair, CoreIndex, Hash, Id as ParaId,
 	NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, SessionIndex,
@@ -61,7 +61,7 @@ use polkadot_primitives::{
 };
 use schnellru::{ByLength, LruMap};
 use sp_core::crypto::Pair;
-use std::sync::Arc;
+use std::{collections::HashSet, sync::Arc};
 
 mod error;
 
@@ -276,13 +276,15 @@ impl CollationGenerationSubsystem {
 		let claim_queue =
 			ClaimQueueSnapshot::from(request_claim_queue(relay_parent, ctx.sender()).await.await??);
 
-		let cores_to_build_on = claim_queue
-			.iter_claims_at_depth(0)
-			.filter_map(|(core_idx, para_id)| (para_id == config.para_id).then_some(core_idx))
+		let assigned_cores = claim_queue
+			.iter_all_claims()
+			.filter_map(|(core_idx, para_ids)| {
+				para_ids.iter().any(|&para_id| para_id == config.para_id).then_some(*core_idx)
+			})
 			.collect::<Vec<_>>();
 
-		// Nothing to do if no core assigned to us.
-		if cores_to_build_on.is_empty() {
+		// Nothing to do if no core is assigned to us at any depth.
+		if assigned_cores.is_empty() {
 			return Ok(())
 		}
 
@@ -342,9 +344,13 @@ impl CollationGenerationSubsystem {
 		ctx.spawn(
 			"chained-collation-builder",
 			Box::pin(async move {
-				let transposed_claim_queue = transpose_claim_queue(claim_queue.0);
+				let transposed_claim_queue = transpose_claim_queue(claim_queue.0.clone());
 
-				for core_index in cores_to_build_on {
+				// Track used core indexes not to submit collations on the same core.
+				let mut used_cores = HashSet::new();
+
+				for i in 0..assigned_cores.len() {
+					// Get the collation.
 					let collator_fn = match task_config.collator.as_ref() {
 						Some(x) => x,
 						None => return,
@@ -363,6 +369,68 @@ impl CollationGenerationSubsystem {
 							},
 						};
 
+					// Use the core_selector method from CandidateCommitments to extract
+					// CoreSelector and ClaimQueueOffset.
+					let mut commitments = CandidateCommitments::default();
+					commitments.upward_messages = collation.upward_messages.clone();
+
+					let (cs_index, cq_offset) = match commitments.core_selector() {
+						// Use the CoreSelector's index if provided.
+						Ok(Some((sel, off))) => (sel.0 as usize, off),
+						// Fallback to the sequential index if no CoreSelector is provided.
+						Ok(None) => (i, ClaimQueueOffset(0)),
+						Err(err) => {
+							gum::debug!(
+								target: LOG_TARGET,
+								?para_id,
+								"error processing UMP signals: {}",
+								err
+							);
+							return
+						},
+					};
+
+					// Identify the cores to build collations on using the given claim queue offset.
+					let cores_to_build_on = claim_queue
+						.iter_claims_at_depth(cq_offset.0 as usize)
+						.filter_map(|(core_idx, para_id)| {
+							(para_id == task_config.para_id).then_some(core_idx)
+						})
+						.collect::<Vec<_>>();
+
+					if cores_to_build_on.is_empty() {
+						gum::debug!(
+							target: LOG_TARGET,
+							?para_id,
+							"no core is assigned to para at depth {}",
+							cq_offset.0,
+						);
+						return
+					}
+
+					let descriptor_core_index =
+						cores_to_build_on[cs_index % cores_to_build_on.len()];
+
+					// Ensure the core index has not been used before.
+					if used_cores.contains(&descriptor_core_index.0) {
+						gum::warn!(
+							target: LOG_TARGET,
+							?para_id,
+							"parachain repeatedly selected the same core index: {}",
+							descriptor_core_index.0,
+						);
+						return
+					}
+
+					used_cores.insert(descriptor_core_index.0);
+					gum::trace!(
+						target: LOG_TARGET,
+						?para_id,
+						"selected core index: {}",
+						descriptor_core_index.0,
+					);
+
+					// Distribute the collation.
 					let parent_head = collation.head_data.clone();
 					if let Err(err) = construct_and_distribute_receipt(
 						PreparedCollation {
@@ -372,7 +440,7 @@ impl CollationGenerationSubsystem {
 							validation_data: validation_data.clone(),
 							validation_code_hash,
 							n_validators,
-							core_index,
+							core_index: descriptor_core_index,
 							session_index,
 						},
 						task_config.key.clone(),
diff --git a/polkadot/node/collation-generation/src/tests.rs b/polkadot/node/collation-generation/src/tests.rs
index f81c14cdf8f..dc1d7b3489c 100644
--- a/polkadot/node/collation-generation/src/tests.rs
+++ b/polkadot/node/collation-generation/src/tests.rs
@@ -16,11 +16,10 @@
 
 use super::*;
 use assert_matches::assert_matches;
-use futures::{
-	task::{Context as FuturesContext, Poll},
-	Future, StreamExt,
+use futures::{self, Future, StreamExt};
+use polkadot_node_primitives::{
+	BlockData, Collation, CollationResult, CollatorFn, MaybeCompressedPoV, PoV,
 };
-use polkadot_node_primitives::{BlockData, Collation, CollationResult, MaybeCompressedPoV, PoV};
 use polkadot_node_subsystem::{
 	messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest},
 	ActivatedLeaf,
@@ -28,14 +27,16 @@ use polkadot_node_subsystem::{
 use polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle;
 use polkadot_node_subsystem_util::TimeoutExt;
 use polkadot_primitives::{
-	node_features, vstaging::CandidateDescriptorVersion, CollatorPair, PersistedValidationData,
+	node_features,
+	vstaging::{CandidateDescriptorVersion, CoreSelector, UMPSignal, UMP_SEPARATOR},
+	CollatorPair, PersistedValidationData,
 };
 use polkadot_primitives_test_helpers::dummy_head_data;
 use rstest::rstest;
 use sp_keyring::sr25519::Keyring as Sr25519Keyring;
 use std::{
 	collections::{BTreeMap, VecDeque},
-	pin::Pin,
+	sync::Mutex,
 };
 
 type VirtualOverseer = TestSubsystemContextHandle<CollationGenerationMessage>;
@@ -79,17 +80,64 @@ fn test_collation() -> Collation {
 	}
 }
 
-struct TestCollator;
+struct CoreSelectorData {
+	// The core selector index.
+	index: u8,
+	// The increment value for the core selector index. Normally 1, but can be set to 0 or another
+	// value for testing scenarios where a parachain repeatedly selects the same core index.
+	increment_index_by: u8,
+	// The claim queue offset.
+	cq_offset: u8,
+}
+
+impl CoreSelectorData {
+	fn new(index: u8, increment_index_by: u8, cq_offset: u8) -> Self {
+		Self { index, increment_index_by, cq_offset }
+	}
+}
 
-impl Future for TestCollator {
-	type Output = Option<CollationResult>;
+struct State {
+	core_selector_data: Option<CoreSelectorData>,
+}
 
-	fn poll(self: Pin<&mut Self>, _cx: &mut FuturesContext) -> Poll<Self::Output> {
-		Poll::Ready(Some(CollationResult { collation: test_collation(), result_sender: None }))
+impl State {
+	fn new(core_selector_data: Option<CoreSelectorData>) -> Self {
+		Self { core_selector_data }
 	}
 }
 
-impl Unpin for TestCollator {}
+struct TestCollator {
+	state: Arc<Mutex<State>>,
+}
+
+impl TestCollator {
+	fn new(core_selector_data: Option<CoreSelectorData>) -> Self {
+		Self { state: Arc::new(Mutex::new(State::new(core_selector_data))) }
+	}
+
+	pub fn create_collation_function(&self) -> CollatorFn {
+		let state = Arc::clone(&self.state);
+
+		Box::new(move |_relay_parent: Hash, _validation_data: &PersistedValidationData| {
+			let mut collation = test_collation();
+			let mut state_guard = state.lock().unwrap();
+
+			if let Some(core_selector_data) = &mut state_guard.core_selector_data {
+				collation.upward_messages.force_push(UMP_SEPARATOR);
+				collation.upward_messages.force_push(
+					UMPSignal::SelectCore(
+						CoreSelector(core_selector_data.index),
+						ClaimQueueOffset(core_selector_data.cq_offset),
+					)
+					.encode(),
+				);
+				core_selector_data.index += core_selector_data.increment_index_by;
+			}
+
+			async move { Some(CollationResult { collation, result_sender: None }) }.boxed()
+		})
+	}
+}
 
 const TIMEOUT: std::time::Duration = std::time::Duration::from_millis(2000);
 
@@ -101,10 +149,14 @@ async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
 		.expect(&format!("{:?} is long enough to receive messages", TIMEOUT))
 }
 
-fn test_config<Id: Into<ParaId>>(para_id: Id) -> CollationGenerationConfig {
+fn test_config<Id: Into<ParaId>>(
+	para_id: Id,
+	core_selector_data: Option<CoreSelectorData>,
+) -> CollationGenerationConfig {
+	let test_collator = TestCollator::new(core_selector_data);
 	CollationGenerationConfig {
 		key: CollatorPair::generate().0,
-		collator: Some(Box::new(|_: Hash, _vd: &PersistedValidationData| TestCollator.boxed())),
+		collator: Some(test_collator.create_collation_function()),
 		para_id: para_id.into(),
 	}
 }
@@ -219,7 +271,7 @@ fn distribute_collation_only_for_assigned_para_id_at_offset_0() {
 		.collect::<BTreeMap<_, _>>();
 
 	test_harness(|mut virtual_overseer| async move {
-		helpers::initialize_collator(&mut virtual_overseer, para_id).await;
+		helpers::initialize_collator(&mut virtual_overseer, para_id, None).await;
 		helpers::activate_new_head(&mut virtual_overseer, activated_hash).await;
 		helpers::handle_runtime_calls_on_new_head_activation(
 			&mut virtual_overseer,
@@ -259,7 +311,7 @@ fn distribute_collation_with_elastic_scaling(#[case] total_cores: u32) {
 		.collect::<BTreeMap<_, _>>();
 
 	test_harness(|mut virtual_overseer| async move {
-		helpers::initialize_collator(&mut virtual_overseer, para_id).await;
+		helpers::initialize_collator(&mut virtual_overseer, para_id, None).await;
 		helpers::activate_new_head(&mut virtual_overseer, activated_hash).await;
 		helpers::handle_runtime_calls_on_new_head_activation(
 			&mut virtual_overseer,
@@ -281,6 +333,127 @@ fn distribute_collation_with_elastic_scaling(#[case] total_cores: u32) {
 	});
 }
 
+// Tests when submission core indexes need to be selected using the core selectors provided in the
+// UMP signals. The core selector index is an increasing number that can start with a non-negative
+// value (even greater than the core index), but the collation generation protocol uses the
+// remainder to select the core. UMP signals may also contain a claim queue offset, based on which
+// we need to select the assigned core indexes for the para from that offset in the claim queue.
+#[rstest]
+#[case(0, 0, 0, false)]
+#[case(1, 0, 0, true)]
+#[case(1, 5, 0, false)]
+#[case(2, 0, 1, true)]
+#[case(4, 2, 2, false)]
+fn distribute_collation_with_core_selectors(
+	#[case] total_cores: u32,
+	// The core selector index that will be obtained from the first collation.
+	#[case] init_cs_index: u8,
+	// Claim queue offset where the assigned cores will be stored.
+	#[case] cq_offset: u8,
+	// Enables v2 receipts feature, affecting core selector and claim queue handling.
+	#[case] v2_receipts: bool,
+) {
+	let activated_hash: Hash = [1; 32].into();
+	let para_id = ParaId::from(5);
+	let other_para_id = ParaId::from(10);
+	let node_features =
+		if v2_receipts { node_features_with_v2_enabled() } else { NodeFeatures::EMPTY };
+
+	let claim_queue = (0..total_cores)
+		.into_iter()
+		.map(|idx| {
+			// Set all cores assigned to para_id 5 at the cq_offset depth.
+			let mut vec = VecDeque::from(vec![other_para_id; cq_offset as usize]);
+			vec.push_back(para_id);
+			(CoreIndex(idx), vec)
+		})
+		.collect::<BTreeMap<_, _>>();
+
+	test_harness(|mut virtual_overseer| async move {
+		helpers::initialize_collator(
+			&mut virtual_overseer,
+			para_id,
+			Some(CoreSelectorData::new(init_cs_index, 1, cq_offset)),
+		)
+		.await;
+		helpers::activate_new_head(&mut virtual_overseer, activated_hash).await;
+		helpers::handle_runtime_calls_on_new_head_activation(
+			&mut virtual_overseer,
+			activated_hash,
+			claim_queue,
+			node_features,
+		)
+		.await;
+
+		let mut cores_assigned = (0..total_cores).collect::<Vec<_>>();
+		if total_cores > 1 && init_cs_index > 0 {
+			// We need to rotate the list of cores because the first core selector index was
+			// non-zero, which should change the sequence of submissions. However, collations should
+			// still be submitted on all cores.
+			cores_assigned.rotate_left((init_cs_index as u32 % total_cores) as usize);
+		}
+		helpers::handle_cores_processing_for_a_leaf(
+			&mut virtual_overseer,
+			activated_hash,
+			para_id,
+			cores_assigned,
+		)
+		.await;
+
+		virtual_overseer
+	});
+}
+
+// Tests the behavior when a parachain repeatedly selects the same core index.
+// Ensures that the system handles this behavior correctly while maintaining expected functionality.
+#[rstest]
+#[case(3, 0, vec![0])]
+#[case(3, 1, vec![0, 1, 2])]
+#[case(3, 2, vec![0, 2, 1])]
+#[case(3, 3, vec![0])]
+#[case(3, 4, vec![0, 1, 2])]
+fn distribute_collation_with_repeated_core_selector_index(
+	#[case] total_cores: u32,
+	#[case] increment_cs_index_by: u8,
+	#[case] expected_selected_cores: Vec<u32>,
+) {
+	let activated_hash: Hash = [1; 32].into();
+	let para_id = ParaId::from(5);
+	let node_features = node_features_with_v2_enabled();
+
+	let claim_queue = (0..total_cores)
+		.into_iter()
+		.map(|idx| (CoreIndex(idx), VecDeque::from([para_id])))
+		.collect::<BTreeMap<_, _>>();
+
+	test_harness(|mut virtual_overseer| async move {
+		helpers::initialize_collator(
+			&mut virtual_overseer,
+			para_id,
+			Some(CoreSelectorData::new(0, increment_cs_index_by, 0)),
+		)
+		.await;
+		helpers::activate_new_head(&mut virtual_overseer, activated_hash).await;
+		helpers::handle_runtime_calls_on_new_head_activation(
+			&mut virtual_overseer,
+			activated_hash,
+			claim_queue,
+			node_features,
+		)
+		.await;
+
+		helpers::handle_cores_processing_for_a_leaf(
+			&mut virtual_overseer,
+			activated_hash,
+			para_id,
+			expected_selected_cores,
+		)
+		.await;
+
+		virtual_overseer
+	});
+}
+
 #[rstest]
 #[case(true)]
 #[case(false)]
@@ -405,10 +578,17 @@ mod helpers {
 	use std::collections::{BTreeMap, VecDeque};
 
 	// Sends `Initialize` with a collator config
-	pub async fn initialize_collator(virtual_overseer: &mut VirtualOverseer, para_id: ParaId) {
+	pub async fn initialize_collator(
+		virtual_overseer: &mut VirtualOverseer,
+		para_id: ParaId,
+		core_selector_data: Option<CoreSelectorData>,
+	) {
 		virtual_overseer
 			.send(FromOrchestra::Communication {
-				msg: CollationGenerationMessage::Initialize(test_config(para_id)),
+				msg: CollationGenerationMessage::Initialize(test_config(
+					para_id,
+					core_selector_data,
+				)),
 			})
 			.await;
 	}
diff --git a/prdoc/pr_7104.prdoc b/prdoc/pr_7104.prdoc
new file mode 100644
index 00000000000..bd05e2b60e1
--- /dev/null
+++ b/prdoc/pr_7104.prdoc
@@ -0,0 +1,23 @@
+title: "collation-generation: resolve mismatch between descriptor and commitments core index"
+
+doc:
+  - audience: Node Dev
+    description: |
+      This PR resolves a bug where collators failed to generate and submit collations,
+      resulting in the following error:
+
+      ```
+      ERROR tokio-runtime-worker parachain::collation-generation: Failed to construct and
+      distribute collation: V2 core index check failed: The core index in commitments doesn't
+      match the one in descriptor.
+      ```
+
+      This issue affects only legacy and test collators that still use the collation function.
+      It is not a problem for lookahead or slot-based collators.
+
+      This fix ensures the descriptor core index contains the value determined by the core
+      selector UMP signal when the parachain is using RFC103.
+
+crates:
+  - name: polkadot-node-collation-generation
+    bump: patch
-- 
GitLab


From 350a6c4ccc4c2f376b9f5ed259daf3a56d5fed56 Mon Sep 17 00:00:00 2001
From: Serban Iorga <serban@parity.io>
Date: Wed, 22 Jan 2025 14:02:01 +0200
Subject: [PATCH 095/116] Fix bridge tests image (#7292)

Fix bridge tests image
---
 docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile
index b1f4bffc772..f9879fea208 100644
--- a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile
+++ b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile
@@ -48,8 +48,9 @@ RUN set -eux; \
 	cd /home/nonroot/bridges-polkadot-sdk/bridges/testing/framework/utils/generate_hex_encoded_call; \
 	npm install
 
+# use the non-root user
+USER node
 # check if executable works in this container
-USER nonroot
 RUN /usr/local/bin/polkadot --version
 RUN /usr/local/bin/polkadot-parachain --version
 RUN /usr/local/bin/substrate-relay --version
-- 
GitLab


From 634a17b6f67c71e589f921b0ddd4c23bbed883f1 Mon Sep 17 00:00:00 2001
From: Mrisho Lukamba <69342343+MrishoLukamba@users.noreply.github.com>
Date: Wed, 22 Jan 2025 18:06:18 +0300
Subject: [PATCH 096/116] Unify Import verifier usage across parachain template
 and omninode (#7195)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Closes #7055

@skunert @bkchr

---------

Co-authored-by: Bastian Köcher <git@kchr.de>
Co-authored-by: command-bot <>
Co-authored-by: Sebastian Kunert <skunert49@gmail.com>
---
 .../aura/src/equivocation_import_queue.rs     | 31 +++++++++-
 .../polkadot-omni-node/lib/src/nodes/aura.rs  | 56 +++++++------------
 prdoc/pr_7195.prdoc                           |  7 +++
 3 files changed, 56 insertions(+), 38 deletions(-)
 create mode 100644 prdoc/pr_7195.prdoc

diff --git a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs
index dbd9d5ba6a6..a3bc90f53c2 100644
--- a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs
+++ b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs
@@ -68,7 +68,8 @@ impl NaiveEquivocationDefender {
 	}
 }
 
-struct Verifier<P, Client, Block, CIDP> {
+/// A parachain block import verifier that checks for equivocation limits within each slot.
+pub struct Verifier<P, Client, Block, CIDP> {
 	client: Arc<Client>,
 	create_inherent_data_providers: CIDP,
 	defender: Mutex<NaiveEquivocationDefender>,
@@ -76,6 +77,34 @@ struct Verifier<P, Client, Block, CIDP> {
 	_phantom: std::marker::PhantomData<fn() -> (Block, P)>,
 }
 
+impl<P, Client, Block, CIDP> Verifier<P, Client, Block, CIDP>
+where
+	P: Pair,
+	P::Signature: Codec,
+	P::Public: Codec + Debug,
+	Block: BlockT,
+	Client: ProvideRuntimeApi<Block> + Send + Sync,
+	<Client as ProvideRuntimeApi<Block>>::Api: BlockBuilderApi<Block> + AuraApi<Block, P::Public>,
+
+	CIDP: CreateInherentDataProviders<Block, ()>,
+{
+	/// Creates a new Verifier instance for handling parachain block import verification in Aura
+	/// consensus.
+	pub fn new(
+		client: Arc<Client>,
+		inherent_data_provider: CIDP,
+		telemetry: Option<TelemetryHandle>,
+	) -> Self {
+		Self {
+			client,
+			create_inherent_data_providers: inherent_data_provider,
+			defender: Mutex::new(NaiveEquivocationDefender::default()),
+			telemetry,
+			_phantom: std::marker::PhantomData,
+		}
+	}
+}
+
 #[async_trait::async_trait]
 impl<P, Client, Block, CIDP> VerifierT<Block> for Verifier<P, Client, Block, CIDP>
 where
diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs
index 816f76117a2..cd0e35d0d06 100644
--- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs
+++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs
@@ -37,9 +37,12 @@ use cumulus_client_collator::service::{
 use cumulus_client_consensus_aura::collators::slot_based::{
 	self as slot_based, Params as SlotBasedParams,
 };
-use cumulus_client_consensus_aura::collators::{
-	lookahead::{self as aura, Params as AuraParams},
-	slot_based::{SlotBasedBlockImport, SlotBasedBlockImportHandle},
+use cumulus_client_consensus_aura::{
+	collators::{
+		lookahead::{self as aura, Params as AuraParams},
+		slot_based::{SlotBasedBlockImport, SlotBasedBlockImportHandle},
+	},
+	equivocation_import_queue::Verifier as EquivocationVerifier,
 };
 use cumulus_client_consensus_proposer::{Proposer, ProposerInterface};
 use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier;
@@ -118,49 +121,28 @@ where
 		telemetry_handle: Option<TelemetryHandle>,
 		task_manager: &TaskManager,
 	) -> sc_service::error::Result<DefaultImportQueue<Block>> {
-		let verifier_client = client.clone();
-
-		let aura_verifier = cumulus_client_consensus_aura::build_verifier::<
-			<AuraId as AppCrypto>::Pair,
-			_,
-			_,
-			_,
-		>(cumulus_client_consensus_aura::BuildVerifierParams {
-			client: verifier_client.clone(),
-			create_inherent_data_providers: move |parent_hash, _| {
-				let cidp_client = verifier_client.clone();
-				async move {
-					let slot_duration = cumulus_client_consensus_aura::slot_duration_at(
-						&*cidp_client,
-						parent_hash,
-					)?;
-					let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
-
-					let slot =
-                        sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
-                            *timestamp,
-                            slot_duration,
-                        );
-
-					Ok((slot, timestamp))
-				}
-			},
-			telemetry: telemetry_handle,
-		});
+		let inherent_data_providers =
+			move |_, _| async move { Ok(sp_timestamp::InherentDataProvider::from_system_time()) };
+		let registry = config.prometheus_registry();
+		let spawner = task_manager.spawn_essential_handle();
 
 		let relay_chain_verifier =
 			Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) }));
 
+		let equivocation_aura_verifier =
+			EquivocationVerifier::<<AuraId as AppCrypto>::Pair, _, _, _>::new(
+				client.clone(),
+				inherent_data_providers,
+				telemetry_handle,
+			);
+
 		let verifier = Verifier {
 			client,
+			aura_verifier: Box::new(equivocation_aura_verifier),
 			relay_chain_verifier,
-			aura_verifier: Box::new(aura_verifier),
-			_phantom: PhantomData,
+			_phantom: Default::default(),
 		};
 
-		let registry = config.prometheus_registry();
-		let spawner = task_manager.spawn_essential_handle();
-
 		Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry))
 	}
 }
diff --git a/prdoc/pr_7195.prdoc b/prdoc/pr_7195.prdoc
new file mode 100644
index 00000000000..db4f877b156
--- /dev/null
+++ b/prdoc/pr_7195.prdoc
@@ -0,0 +1,7 @@
+title: Unify Import verifier usage across parachain template and omninode
+doc:
+- audience: Node Dev
+  description: |-
+    In polkadot-omni-node block import pipeline it uses default aura verifier without checking equivocation,
+    This Pr replaces the check with full verification with equivocation like in parachain template block import 
+crates: []
-- 
GitLab


From fd64a1e7768ba6e8676cbbf25c4e821a901c0a7f Mon Sep 17 00:00:00 2001
From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com>
Date: Wed, 22 Jan 2025 18:51:59 +0200
Subject: [PATCH 097/116] net/libp2p: Enforce outbound request-response timeout
 limits (#7222)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This PR enforces that outbound requests are finished within the
specified protocol timeout.

The stable2412 version running libp2p 0.52.4 contains a bug which does
not track request timeouts properly:
- https://github.com/libp2p/rust-libp2p/pull/5429

The issue has been detected while submitting libp2p -> litep2p requests
in kusama. This aims to check that pending outbound requests have not
timedout. Although the issue has been fixed in libp2p, there might be
other cases where this may happen. For example:
- https://github.com/libp2p/rust-libp2p/pull/5417

For more context see:
https://github.com/paritytech/polkadot-sdk/issues/7076#issuecomment-2596085096


1. Ideally, the force-timeout mechanism in this PR should never be
triggered in production. However, origin/stable2412 occasionally
encounters this issue. When this happens, 2 warnings may be generated:
- one warning introduced by this PR wrt force timeout terminating the
request
- possible one warning when the libp2p decides (if at all) to provide
the response back to substrate (as mentioned by @alexggh
[here](https://github.com/paritytech/polkadot-sdk/pull/7222/files#diff-052aeaf79fef3d9a18c2cfd67006aa306b8d52e848509d9077a6a0f2eb856af7L769)
and
[here](https://github.com/paritytech/polkadot-sdk/pull/7222/files#diff-052aeaf79fef3d9a18c2cfd67006aa306b8d52e848509d9077a6a0f2eb856af7L842)

2. This implementation does not propagate to the substrate service the
`RequestFinished { error: .. }`. That event is only used internally by
substrate to increment metrics. However, we don't have the peer
information available to propagate the event properly when we
force-timeout the request. Considering this should most likely not
happen in production (origin/master) and that we'll be able to extract
information by warnings, I would say this is a good tradeoff for code
simplicity:


https://github.com/paritytech/polkadot-sdk/blob/06e3b5c6a7696048d65f1b8729f16b379a16f501/substrate/client/network/src/service.rs#L1543


### Testing

Added a new test to ensure the timeout is reached properly, even if
libp2p does not produce a response in due time.

I've also transitioned the tests to using `tokio::test` due to a
limitation of
[CI](https://github.com/paritytech/polkadot-sdk/actions/runs/12832055737/job/35784043867)

```
--- TRY 1 STDERR:        sc-network request_responses::tests::max_response_size_exceeded ---
thread 'request_responses::tests::max_response_size_exceeded' panicked at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/tokio-1.40.0/src/time/interval.rs:139:26:
there is no reactor running, must be called from the context of a Tokio 1.x runtime
```



cc @paritytech/networking

---------

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>
Co-authored-by: Bastian Köcher <git@kchr.de>
---
 prdoc/pr_7222.prdoc                           |   19 +
 .../client/network/src/request_responses.rs   | 1020 ++++++++++-------
 2 files changed, 612 insertions(+), 427 deletions(-)
 create mode 100644 prdoc/pr_7222.prdoc

diff --git a/prdoc/pr_7222.prdoc b/prdoc/pr_7222.prdoc
new file mode 100644
index 00000000000..40b89b0a182
--- /dev/null
+++ b/prdoc/pr_7222.prdoc
@@ -0,0 +1,19 @@
+title: Enforce libp2p outbound request-response timeout limits
+
+doc:
+  - audience: Node Dev
+    description: |
+      This PR enforces that outbound requests are finished within the specified protocol timeout.
+      The stable2412 version running libp2p 0.52.4 contains a bug which does not track request timeouts properly
+      https://github.com/libp2p/rust-libp2p/pull/5429.
+  
+      The issue has been detected while submitting libp2p to litep2p requests in Kusama.
+      This aims to check that pending outbound requests have not timed out.
+      Although the issue has been fixed in libp2p, there might be other cases where this may happen.
+      For example, https://github.com/libp2p/rust-libp2p/pull/5417.
+
+      For more context see https://github.com/paritytech/polkadot-sdk/issues/7076#issuecomment-2596085096.
+
+crates:
+- name: sc-network
+  bump: patch
diff --git a/substrate/client/network/src/request_responses.rs b/substrate/client/network/src/request_responses.rs
index 5fe34c78137..e21773632ed 100644
--- a/substrate/client/network/src/request_responses.rs
+++ b/substrate/client/network/src/request_responses.rs
@@ -64,6 +64,9 @@ use std::{
 
 pub use libp2p::request_response::{Config, InboundRequestId, OutboundRequestId};
 
+/// Periodically check if requests are taking too long.
+const PERIODIC_REQUEST_CHECK: Duration = Duration::from_secs(2);
+
 /// Possible failures occurring in the context of sending an outbound request and receiving the
 /// response.
 #[derive(Debug, Clone, thiserror::Error)]
@@ -251,8 +254,14 @@ pub struct OutgoingResponse {
 
 /// Information stored about a pending request.
 struct PendingRequest {
+	/// The time when the request was sent to the libp2p request-response protocol.
 	started_at: Instant,
-	response_tx: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
+	/// The channel to send the response back to the caller.
+	///
+	/// This is wrapped in an `Option` to allow for the channel to be taken out
+	/// on force-detected timeouts.
+	response_tx: Option<oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>>,
+	/// Fallback request to send if the primary request fails.
 	fallback_request: Option<(Vec<u8>, ProtocolName)>,
 }
 
@@ -336,16 +345,20 @@ impl<RequestId> From<(ProtocolName, RequestId)> for ProtocolRequestId<RequestId>
 	}
 }
 
+/// Details of a request-response protocol.
+struct ProtocolDetails {
+	behaviour: Behaviour<GenericCodec>,
+	inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
+	request_timeout: Duration,
+}
+
 /// Implementation of `NetworkBehaviour` that provides support for request-response protocols.
 pub struct RequestResponsesBehaviour {
 	/// The multiple sub-protocols, by name.
 	///
 	/// Contains the underlying libp2p request-response [`Behaviour`], plus an optional
 	/// "response builder" used to build responses for incoming requests.
-	protocols: HashMap<
-		ProtocolName,
-		(Behaviour<GenericCodec>, Option<async_channel::Sender<IncomingRequest>>),
-	>,
+	protocols: HashMap<ProtocolName, ProtocolDetails>,
 
 	/// Pending requests, passed down to a request-response [`Behaviour`], awaiting a reply.
 	pending_requests: HashMap<ProtocolRequestId<OutboundRequestId>, PendingRequest>,
@@ -365,6 +378,14 @@ pub struct RequestResponsesBehaviour {
 
 	/// Primarily used to get a reputation of a node.
 	peer_store: Arc<dyn PeerStoreProvider>,
+
+	/// Interval to check that the requests are not taking too long.
+	///
+	/// We had issues in the past where libp2p did not produce a timeout event in due time.
+	///
+	/// For more details, see:
+	/// - <https://github.com/paritytech/polkadot-sdk/issues/7076#issuecomment-2596085096>
+	periodic_request_check: tokio::time::Interval,
 }
 
 /// Generated by the response builder and waiting to be processed.
@@ -393,7 +414,7 @@ impl RequestResponsesBehaviour {
 				ProtocolSupport::Outbound
 			};
 
-			let rq_rp = Behaviour::with_codec(
+			let behaviour = Behaviour::with_codec(
 				GenericCodec {
 					max_request_size: protocol.max_request_size,
 					max_response_size: protocol.max_response_size,
@@ -405,7 +426,11 @@ impl RequestResponsesBehaviour {
 			);
 
 			match protocols.entry(protocol.name) {
-				Entry::Vacant(e) => e.insert((rq_rp, protocol.inbound_queue)),
+				Entry::Vacant(e) => e.insert(ProtocolDetails {
+					behaviour,
+					inbound_queue: protocol.inbound_queue,
+					request_timeout: protocol.request_timeout,
+				}),
 				Entry::Occupied(e) => return Err(RegisterError::DuplicateProtocol(e.key().clone())),
 			};
 		}
@@ -417,6 +442,7 @@ impl RequestResponsesBehaviour {
 			pending_responses_arrival_time: Default::default(),
 			send_feedback: Default::default(),
 			peer_store,
+			periodic_request_check: tokio::time::interval(PERIODIC_REQUEST_CHECK),
 		})
 	}
 
@@ -437,9 +463,11 @@ impl RequestResponsesBehaviour {
 	) {
 		log::trace!(target: "sub-libp2p", "send request to {target} ({protocol_name:?}), {} bytes", request.len());
 
-		if let Some((protocol, _)) = self.protocols.get_mut(protocol_name.deref()) {
+		if let Some(ProtocolDetails { behaviour, .. }) =
+			self.protocols.get_mut(protocol_name.deref())
+		{
 			Self::send_request_inner(
-				protocol,
+				behaviour,
 				&mut self.pending_requests,
 				target,
 				protocol_name,
@@ -474,7 +502,7 @@ impl RequestResponsesBehaviour {
 				(protocol_name.to_string().into(), request_id).into(),
 				PendingRequest {
 					started_at: Instant::now(),
-					response_tx: pending_response,
+					response_tx: Some(pending_response),
 					fallback_request,
 				},
 			);
@@ -521,18 +549,19 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 		local_addr: &Multiaddr,
 		remote_addr: &Multiaddr,
 	) -> Result<THandler<Self>, ConnectionDenied> {
-		let iter = self.protocols.iter_mut().filter_map(|(p, (r, _))| {
-			if let Ok(handler) = r.handle_established_inbound_connection(
-				connection_id,
-				peer,
-				local_addr,
-				remote_addr,
-			) {
-				Some((p.to_string(), handler))
-			} else {
-				None
-			}
-		});
+		let iter =
+			self.protocols.iter_mut().filter_map(|(p, ProtocolDetails { behaviour, .. })| {
+				if let Ok(handler) = behaviour.handle_established_inbound_connection(
+					connection_id,
+					peer,
+					local_addr,
+					remote_addr,
+				) {
+					Some((p.to_string(), handler))
+				} else {
+					None
+				}
+			});
 
 		Ok(MultiHandler::try_from_iter(iter).expect(
 			"Protocols are in a HashMap and there can be at most one handler per protocol name, \
@@ -548,19 +577,20 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 		role_override: Endpoint,
 		port_use: PortUse,
 	) -> Result<THandler<Self>, ConnectionDenied> {
-		let iter = self.protocols.iter_mut().filter_map(|(p, (r, _))| {
-			if let Ok(handler) = r.handle_established_outbound_connection(
-				connection_id,
-				peer,
-				addr,
-				role_override,
-				port_use,
-			) {
-				Some((p.to_string(), handler))
-			} else {
-				None
-			}
-		});
+		let iter =
+			self.protocols.iter_mut().filter_map(|(p, ProtocolDetails { behaviour, .. })| {
+				if let Ok(handler) = behaviour.handle_established_outbound_connection(
+					connection_id,
+					peer,
+					addr,
+					role_override,
+					port_use,
+				) {
+					Some((p.to_string(), handler))
+				} else {
+					None
+				}
+			});
 
 		Ok(MultiHandler::try_from_iter(iter).expect(
 			"Protocols are in a HashMap and there can be at most one handler per protocol name, \
@@ -569,8 +599,8 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 	}
 
 	fn on_swarm_event(&mut self, event: FromSwarm) {
-		for (protocol, _) in self.protocols.values_mut() {
-			protocol.on_swarm_event(event);
+		for ProtocolDetails { behaviour, .. } in self.protocols.values_mut() {
+			behaviour.on_swarm_event(event);
 		}
 	}
 
@@ -581,8 +611,8 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 		event: THandlerOutEvent<Self>,
 	) {
 		let p_name = event.0;
-		if let Some((proto, _)) = self.protocols.get_mut(p_name.as_str()) {
-			return proto.on_connection_handler_event(peer_id, connection_id, event.1)
+		if let Some(ProtocolDetails { behaviour, .. }) = self.protocols.get_mut(p_name.as_str()) {
+			return behaviour.on_connection_handler_event(peer_id, connection_id, event.1)
 		} else {
 			log::warn!(
 				target: "sub-libp2p",
@@ -594,6 +624,51 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 
 	fn poll(&mut self, cx: &mut Context) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> {
 		'poll_all: loop {
+			// Poll the periodic request check.
+			if self.periodic_request_check.poll_tick(cx).is_ready() {
+				self.pending_requests.retain(|id, req| {
+					let Some(ProtocolDetails { request_timeout, .. }) =
+						self.protocols.get(&id.protocol)
+					else {
+						log::warn!(
+							target: "sub-libp2p",
+							"Request {id:?} has no protocol registered.",
+						);
+
+						if let Some(response_tx) = req.response_tx.take() {
+							if response_tx.send(Err(RequestFailure::UnknownProtocol)).is_err() {
+								log::debug!(
+									target: "sub-libp2p",
+									"Request {id:?} has no protocol registered. At the same time local node is no longer interested in the result.",
+								);
+							}
+						}
+						return false
+					};
+
+					let elapsed = req.started_at.elapsed();
+					if elapsed > *request_timeout {
+						log::debug!(
+							target: "sub-libp2p",
+							"Request {id:?} force detected as timeout.",
+						);
+
+						if let Some(response_tx) = req.response_tx.take() {
+							if response_tx.send(Err(RequestFailure::Network(OutboundFailure::Timeout))).is_err() {
+								log::debug!(
+									target: "sub-libp2p",
+									"Request {id:?} force detected as timeout. At the same time local node is no longer interested in the result.",
+								);
+							}
+						}
+
+						false
+					} else {
+						true
+					}
+				});
+			}
+
 			// Poll to see if any response is ready to be sent back.
 			while let Poll::Ready(Some(outcome)) = self.pending_responses.poll_next_unpin(cx) {
 				let RequestProcessingOutcome {
@@ -610,10 +685,12 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 				};
 
 				if let Ok(payload) = result {
-					if let Some((protocol, _)) = self.protocols.get_mut(&*protocol_name) {
+					if let Some(ProtocolDetails { behaviour, .. }) =
+						self.protocols.get_mut(&*protocol_name)
+					{
 						log::trace!(target: "sub-libp2p", "send response to {peer} ({protocol_name:?}), {} bytes", payload.len());
 
-						if protocol.send_response(inner_channel, Ok(payload)).is_err() {
+						if behaviour.send_response(inner_channel, Ok(payload)).is_err() {
 							// Note: Failure is handled further below when receiving
 							// `InboundFailure` event from request-response [`Behaviour`].
 							log::debug!(
@@ -641,7 +718,8 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 			let mut fallback_requests = vec![];
 
 			// Poll request-responses protocols.
-			for (protocol, (ref mut behaviour, ref mut resp_builder)) in &mut self.protocols {
+			for (protocol, ProtocolDetails { behaviour, inbound_queue, .. }) in &mut self.protocols
+			{
 				'poll_protocol: while let Poll::Ready(ev) = behaviour.poll(cx) {
 					let ev = match ev {
 						// Main events we are interested in.
@@ -696,7 +774,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 
 							// Submit the request to the "response builder" passed by the user at
 							// initialization.
-							if let Some(resp_builder) = resp_builder {
+							if let Some(resp_builder) = inbound_queue {
 								// If the response builder is too busy, silently drop `tx`. This
 								// will be reported by the corresponding request-response
 								// [`Behaviour`] through an `InboundFailure::Omission` event.
@@ -744,7 +822,11 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 								.pending_requests
 								.remove(&(protocol.clone(), request_id).into())
 							{
-								Some(PendingRequest { started_at, response_tx, .. }) => {
+								Some(PendingRequest {
+									started_at,
+									response_tx: Some(response_tx),
+									..
+								}) => {
 									log::trace!(
 										target: "sub-libp2p",
 										"received response from {peer} ({protocol:?}), {} bytes",
@@ -760,13 +842,13 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 										.map_err(|_| RequestFailure::Obsolete);
 									(started_at, delivered)
 								},
-								None => {
-									log::warn!(
+								_ => {
+									log::debug!(
 										target: "sub-libp2p",
-										"Received `RequestResponseEvent::Message` with unexpected request id {:?}",
+										"Received `RequestResponseEvent::Message` with unexpected request id {:?} from {:?}",
 										request_id,
+										peer,
 									);
-									debug_assert!(false);
 									continue
 								},
 							};
@@ -795,7 +877,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 							{
 								Some(PendingRequest {
 									started_at,
-									response_tx,
+									response_tx: Some(response_tx),
 									fallback_request,
 								}) => {
 									// Try using the fallback request if the protocol was not
@@ -833,13 +915,14 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 									}
 									started_at
 								},
-								None => {
-									log::warn!(
+								_ => {
+									log::debug!(
 										target: "sub-libp2p",
-										"Received `RequestResponseEvent::Message` with unexpected request id {:?}",
+										"Received `RequestResponseEvent::OutboundFailure` with unexpected request id {:?} error {:?} from {:?}",
 										request_id,
+										error,
+										peer
 									);
-									debug_assert!(false);
 									continue
 								},
 							};
@@ -904,7 +987,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
 
 			// Send out fallback requests.
 			for (peer, protocol, request, pending_response) in fallback_requests.drain(..) {
-				if let Some((behaviour, _)) = self.protocols.get_mut(&protocol) {
+				if let Some(ProtocolDetails { behaviour, .. }) = self.protocols.get_mut(&protocol) {
 					Self::send_request_inner(
 						behaviour,
 						&mut self.pending_requests,
@@ -1073,7 +1156,7 @@ mod tests {
 
 	use crate::mock::MockPeerStore;
 	use assert_matches::assert_matches;
-	use futures::{channel::oneshot, executor::LocalPool, task::Spawn};
+	use futures::channel::oneshot;
 	use libp2p::{
 		core::{
 			transport::{MemoryTransport, Transport},
@@ -1086,10 +1169,10 @@ mod tests {
 	};
 	use std::{iter, time::Duration};
 
-	struct TokioExecutor(tokio::runtime::Runtime);
+	struct TokioExecutor;
 	impl Executor for TokioExecutor {
 		fn exec(&self, f: Pin<Box<dyn Future<Output = ()> + Send>>) {
-			let _ = self.0.spawn(f);
+			tokio::spawn(f);
 		}
 	}
 
@@ -1106,13 +1189,11 @@ mod tests {
 
 		let behaviour = RequestResponsesBehaviour::new(list, Arc::new(MockPeerStore {})).unwrap();
 
-		let runtime = tokio::runtime::Runtime::new().unwrap();
-
 		let mut swarm = Swarm::new(
 			transport,
 			behaviour,
 			keypair.public().to_peer_id(),
-			SwarmConfig::with_executor(TokioExecutor(runtime))
+			SwarmConfig::with_executor(TokioExecutor {})
 				// This is taken care of by notification protocols in non-test environment
 				// It is very slow in test environment for some reason, hence larger timeout
 				.with_idle_connection_timeout(Duration::from_secs(10)),
@@ -1125,34 +1206,27 @@ mod tests {
 		(swarm, listen_addr)
 	}
 
-	#[test]
-	fn basic_request_response_works() {
+	#[tokio::test]
+	async fn basic_request_response_works() {
 		let protocol_name = ProtocolName::from("/test/req-resp/1");
-		let mut pool = LocalPool::new();
 
 		// Build swarms whose behaviour is [`RequestResponsesBehaviour`].
 		let mut swarms = (0..2)
 			.map(|_| {
 				let (tx, mut rx) = async_channel::bounded::<IncomingRequest>(64);
 
-				pool.spawner()
-					.spawn_obj(
-						async move {
-							while let Some(rq) = rx.next().await {
-								let (fb_tx, fb_rx) = oneshot::channel();
-								assert_eq!(rq.payload, b"this is a request");
-								let _ = rq.pending_response.send(super::OutgoingResponse {
-									result: Ok(b"this is a response".to_vec()),
-									reputation_changes: Vec::new(),
-									sent_feedback: Some(fb_tx),
-								});
-								fb_rx.await.unwrap();
-							}
-						}
-						.boxed()
-						.into(),
-					)
-					.unwrap();
+				tokio::spawn(async move {
+					while let Some(rq) = rx.next().await {
+						let (fb_tx, fb_rx) = oneshot::channel();
+						assert_eq!(rq.payload, b"this is a request");
+						let _ = rq.pending_response.send(super::OutgoingResponse {
+							result: Ok(b"this is a response".to_vec()),
+							reputation_changes: Vec::new(),
+							sent_feedback: Some(fb_tx),
+						});
+						fb_rx.await.unwrap();
+					}
+				});
 
 				let protocol_config = ProtocolConfig {
 					name: protocol_name.clone(),
@@ -1176,84 +1250,69 @@ mod tests {
 
 		let (mut swarm, _) = swarms.remove(0);
 		// Running `swarm[0]` in the background.
-		pool.spawner()
-			.spawn_obj({
-				async move {
-					loop {
-						match swarm.select_next_some().await {
-							SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => {
-								result.unwrap();
-							},
-							_ => {},
-						}
-					}
-				}
-				.boxed()
-				.into()
-			})
-			.unwrap();
-
-		// Remove and run the remaining swarm.
-		let (mut swarm, _) = swarms.remove(0);
-		pool.run_until(async move {
-			let mut response_receiver = None;
-
+		tokio::spawn(async move {
 			loop {
 				match swarm.select_next_some().await {
-					SwarmEvent::ConnectionEstablished { peer_id, .. } => {
-						let (sender, receiver) = oneshot::channel();
-						swarm.behaviour_mut().send_request(
-							&peer_id,
-							protocol_name.clone(),
-							b"this is a request".to_vec(),
-							None,
-							sender,
-							IfDisconnected::ImmediateError,
-						);
-						assert!(response_receiver.is_none());
-						response_receiver = Some(receiver);
-					},
-					SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
+					SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => {
 						result.unwrap();
-						break
 					},
 					_ => {},
 				}
 			}
-
-			assert_eq!(
-				response_receiver.unwrap().await.unwrap().unwrap(),
-				(b"this is a response".to_vec(), protocol_name)
-			);
 		});
+
+		// Remove and run the remaining swarm.
+		let (mut swarm, _) = swarms.remove(0);
+		let mut response_receiver = None;
+
+		loop {
+			match swarm.select_next_some().await {
+				SwarmEvent::ConnectionEstablished { peer_id, .. } => {
+					let (sender, receiver) = oneshot::channel();
+					swarm.behaviour_mut().send_request(
+						&peer_id,
+						protocol_name.clone(),
+						b"this is a request".to_vec(),
+						None,
+						sender,
+						IfDisconnected::ImmediateError,
+					);
+					assert!(response_receiver.is_none());
+					response_receiver = Some(receiver);
+				},
+				SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
+					result.unwrap();
+					break
+				},
+				_ => {},
+			}
+		}
+
+		assert_eq!(
+			response_receiver.unwrap().await.unwrap().unwrap(),
+			(b"this is a response".to_vec(), protocol_name)
+		);
 	}
 
-	#[test]
-	fn max_response_size_exceeded() {
+	#[tokio::test]
+	async fn max_response_size_exceeded() {
 		let protocol_name = ProtocolName::from("/test/req-resp/1");
-		let mut pool = LocalPool::new();
 
 		// Build swarms whose behaviour is [`RequestResponsesBehaviour`].
 		let mut swarms = (0..2)
 			.map(|_| {
 				let (tx, mut rx) = async_channel::bounded::<IncomingRequest>(64);
 
-				pool.spawner()
-					.spawn_obj(
-						async move {
-							while let Some(rq) = rx.next().await {
-								assert_eq!(rq.payload, b"this is a request");
-								let _ = rq.pending_response.send(super::OutgoingResponse {
-									result: Ok(b"this response exceeds the limit".to_vec()),
-									reputation_changes: Vec::new(),
-									sent_feedback: None,
-								});
-							}
-						}
-						.boxed()
-						.into(),
-					)
-					.unwrap();
+				tokio::spawn(async move {
+					while let Some(rq) = rx.next().await {
+						assert_eq!(rq.payload, b"this is a request");
+						let _ = rq.pending_response.send(super::OutgoingResponse {
+							result: Ok(b"this response exceeds the limit".to_vec()),
+							reputation_changes: Vec::new(),
+							sent_feedback: None,
+						});
+					}
+				});
 
 				let protocol_config = ProtocolConfig {
 					name: protocol_name.clone(),
@@ -1278,59 +1337,52 @@ mod tests {
 		// Running `swarm[0]` in the background until a `InboundRequest` event happens,
 		// which is a hint about the test having ended.
 		let (mut swarm, _) = swarms.remove(0);
-		pool.spawner()
-			.spawn_obj({
-				async move {
-					loop {
-						match swarm.select_next_some().await {
-							SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => {
-								assert!(result.is_ok());
-							},
-							SwarmEvent::ConnectionClosed { .. } => {
-								break;
-							},
-							_ => {},
-						}
-					}
-				}
-				.boxed()
-				.into()
-			})
-			.unwrap();
-
-		// Remove and run the remaining swarm.
-		let (mut swarm, _) = swarms.remove(0);
-		pool.run_until(async move {
-			let mut response_receiver = None;
-
+		tokio::spawn(async move {
 			loop {
 				match swarm.select_next_some().await {
-					SwarmEvent::ConnectionEstablished { peer_id, .. } => {
-						let (sender, receiver) = oneshot::channel();
-						swarm.behaviour_mut().send_request(
-							&peer_id,
-							protocol_name.clone(),
-							b"this is a request".to_vec(),
-							None,
-							sender,
-							IfDisconnected::ImmediateError,
-						);
-						assert!(response_receiver.is_none());
-						response_receiver = Some(receiver);
+					SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => {
+						assert!(result.is_ok());
 					},
-					SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
-						assert!(result.is_err());
-						break
+					SwarmEvent::ConnectionClosed { .. } => {
+						break;
 					},
 					_ => {},
 				}
 			}
+		});
+
+		// Remove and run the remaining swarm.
+		let (mut swarm, _) = swarms.remove(0);
+
+		let mut response_receiver = None;
 
-			match response_receiver.unwrap().await.unwrap().unwrap_err() {
-				RequestFailure::Network(OutboundFailure::Io(_)) => {},
-				request_failure => panic!("Unexpected failure: {request_failure:?}"),
+		loop {
+			match swarm.select_next_some().await {
+				SwarmEvent::ConnectionEstablished { peer_id, .. } => {
+					let (sender, receiver) = oneshot::channel();
+					swarm.behaviour_mut().send_request(
+						&peer_id,
+						protocol_name.clone(),
+						b"this is a request".to_vec(),
+						None,
+						sender,
+						IfDisconnected::ImmediateError,
+					);
+					assert!(response_receiver.is_none());
+					response_receiver = Some(receiver);
+				},
+				SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
+					assert!(result.is_err());
+					break
+				},
+				_ => {},
 			}
-		});
+		}
+
+		match response_receiver.unwrap().await.unwrap().unwrap_err() {
+			RequestFailure::Network(OutboundFailure::Io(_)) => {},
+			request_failure => panic!("Unexpected failure: {request_failure:?}"),
+		}
 	}
 
 	/// A `RequestId` is a unique identifier among either all inbound or all outbound requests for
@@ -1343,11 +1395,10 @@ mod tests {
 	/// without a `RequestId` collision.
 	///
 	/// See [`ProtocolRequestId`] for additional information.
-	#[test]
-	fn request_id_collision() {
+	#[tokio::test]
+	async fn request_id_collision() {
 		let protocol_name_1 = ProtocolName::from("/test/req-resp-1/1");
 		let protocol_name_2 = ProtocolName::from("/test/req-resp-2/1");
-		let mut pool = LocalPool::new();
 
 		let mut swarm_1 = {
 			let protocol_configs = vec![
@@ -1405,114 +1456,100 @@ mod tests {
 		swarm_1.dial(listen_add_2).unwrap();
 
 		// Run swarm 2 in the background, receiving two requests.
-		pool.spawner()
-			.spawn_obj(
-				async move {
-					loop {
-						match swarm_2.select_next_some().await {
-							SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => {
-								result.unwrap();
-							},
-							_ => {},
-						}
-					}
+		tokio::spawn(async move {
+			loop {
+				match swarm_2.select_next_some().await {
+					SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => {
+						result.unwrap();
+					},
+					_ => {},
 				}
-				.boxed()
-				.into(),
-			)
-			.unwrap();
+			}
+		});
 
 		// Handle both requests sent by swarm 1 to swarm 2 in the background.
 		//
 		// Make sure both requests overlap, by answering the first only after receiving the
 		// second.
-		pool.spawner()
-			.spawn_obj(
-				async move {
-					let protocol_1_request = swarm_2_handler_1.next().await;
-					let protocol_2_request = swarm_2_handler_2.next().await;
-
-					protocol_1_request
-						.unwrap()
-						.pending_response
-						.send(OutgoingResponse {
-							result: Ok(b"this is a response".to_vec()),
-							reputation_changes: Vec::new(),
-							sent_feedback: None,
-						})
-						.unwrap();
-					protocol_2_request
-						.unwrap()
-						.pending_response
-						.send(OutgoingResponse {
-							result: Ok(b"this is a response".to_vec()),
-							reputation_changes: Vec::new(),
-							sent_feedback: None,
-						})
-						.unwrap();
-				}
-				.boxed()
-				.into(),
-			)
-			.unwrap();
+		tokio::spawn(async move {
+			let protocol_1_request = swarm_2_handler_1.next().await;
+			let protocol_2_request = swarm_2_handler_2.next().await;
+
+			protocol_1_request
+				.unwrap()
+				.pending_response
+				.send(OutgoingResponse {
+					result: Ok(b"this is a response".to_vec()),
+					reputation_changes: Vec::new(),
+					sent_feedback: None,
+				})
+				.unwrap();
+			protocol_2_request
+				.unwrap()
+				.pending_response
+				.send(OutgoingResponse {
+					result: Ok(b"this is a response".to_vec()),
+					reputation_changes: Vec::new(),
+					sent_feedback: None,
+				})
+				.unwrap();
+		});
 
 		// Have swarm 1 send two requests to swarm 2 and await responses.
-		pool.run_until(async move {
-			let mut response_receivers = None;
-			let mut num_responses = 0;
 
-			loop {
-				match swarm_1.select_next_some().await {
-					SwarmEvent::ConnectionEstablished { peer_id, .. } => {
-						let (sender_1, receiver_1) = oneshot::channel();
-						let (sender_2, receiver_2) = oneshot::channel();
-						swarm_1.behaviour_mut().send_request(
-							&peer_id,
-							protocol_name_1.clone(),
-							b"this is a request".to_vec(),
-							None,
-							sender_1,
-							IfDisconnected::ImmediateError,
-						);
-						swarm_1.behaviour_mut().send_request(
-							&peer_id,
-							protocol_name_2.clone(),
-							b"this is a request".to_vec(),
-							None,
-							sender_2,
-							IfDisconnected::ImmediateError,
-						);
-						assert!(response_receivers.is_none());
-						response_receivers = Some((receiver_1, receiver_2));
-					},
-					SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
-						num_responses += 1;
-						result.unwrap();
-						if num_responses == 2 {
-							break
-						}
-					},
-					_ => {},
-				}
+		let mut response_receivers = None;
+		let mut num_responses = 0;
+
+		loop {
+			match swarm_1.select_next_some().await {
+				SwarmEvent::ConnectionEstablished { peer_id, .. } => {
+					let (sender_1, receiver_1) = oneshot::channel();
+					let (sender_2, receiver_2) = oneshot::channel();
+					swarm_1.behaviour_mut().send_request(
+						&peer_id,
+						protocol_name_1.clone(),
+						b"this is a request".to_vec(),
+						None,
+						sender_1,
+						IfDisconnected::ImmediateError,
+					);
+					swarm_1.behaviour_mut().send_request(
+						&peer_id,
+						protocol_name_2.clone(),
+						b"this is a request".to_vec(),
+						None,
+						sender_2,
+						IfDisconnected::ImmediateError,
+					);
+					assert!(response_receivers.is_none());
+					response_receivers = Some((receiver_1, receiver_2));
+				},
+				SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
+					num_responses += 1;
+					result.unwrap();
+					if num_responses == 2 {
+						break
+					}
+				},
+				_ => {},
 			}
-			let (response_receiver_1, response_receiver_2) = response_receivers.unwrap();
-			assert_eq!(
-				response_receiver_1.await.unwrap().unwrap(),
-				(b"this is a response".to_vec(), protocol_name_1)
-			);
-			assert_eq!(
-				response_receiver_2.await.unwrap().unwrap(),
-				(b"this is a response".to_vec(), protocol_name_2)
-			);
-		});
+		}
+		let (response_receiver_1, response_receiver_2) = response_receivers.unwrap();
+		assert_eq!(
+			response_receiver_1.await.unwrap().unwrap(),
+			(b"this is a response".to_vec(), protocol_name_1)
+		);
+		assert_eq!(
+			response_receiver_2.await.unwrap().unwrap(),
+			(b"this is a response".to_vec(), protocol_name_2)
+		);
 	}
 
-	#[test]
-	fn request_fallback() {
+	#[tokio::test]
+	async fn request_fallback() {
 		let protocol_name_1 = ProtocolName::from("/test/req-resp/2");
 		let protocol_name_1_fallback = ProtocolName::from("/test/req-resp/1");
 		let protocol_name_2 = ProtocolName::from("/test/another");
-		let mut pool = LocalPool::new();
 
 		let protocol_config_1 = ProtocolConfig {
 			name: protocol_name_1.clone(),
@@ -1550,39 +1587,31 @@ mod tests {
 			let mut protocol_config_2 = protocol_config_2.clone();
 			protocol_config_2.inbound_queue = Some(tx_2);
 
-			pool.spawner()
-				.spawn_obj(
-					async move {
-						for _ in 0..2 {
-							if let Some(rq) = rx_1.next().await {
-								let (fb_tx, fb_rx) = oneshot::channel();
-								assert_eq!(rq.payload, b"request on protocol /test/req-resp/1");
-								let _ = rq.pending_response.send(super::OutgoingResponse {
-									result: Ok(
-										b"this is a response on protocol /test/req-resp/1".to_vec()
-									),
-									reputation_changes: Vec::new(),
-									sent_feedback: Some(fb_tx),
-								});
-								fb_rx.await.unwrap();
-							}
-						}
-
-						if let Some(rq) = rx_2.next().await {
-							let (fb_tx, fb_rx) = oneshot::channel();
-							assert_eq!(rq.payload, b"request on protocol /test/other");
-							let _ = rq.pending_response.send(super::OutgoingResponse {
-								result: Ok(b"this is a response on protocol /test/other".to_vec()),
-								reputation_changes: Vec::new(),
-								sent_feedback: Some(fb_tx),
-							});
-							fb_rx.await.unwrap();
-						}
+			tokio::spawn(async move {
+				for _ in 0..2 {
+					if let Some(rq) = rx_1.next().await {
+						let (fb_tx, fb_rx) = oneshot::channel();
+						assert_eq!(rq.payload, b"request on protocol /test/req-resp/1");
+						let _ = rq.pending_response.send(super::OutgoingResponse {
+							result: Ok(b"this is a response on protocol /test/req-resp/1".to_vec()),
+							reputation_changes: Vec::new(),
+							sent_feedback: Some(fb_tx),
+						});
+						fb_rx.await.unwrap();
 					}
-					.boxed()
-					.into(),
-				)
-				.unwrap();
+				}
+
+				if let Some(rq) = rx_2.next().await {
+					let (fb_tx, fb_rx) = oneshot::channel();
+					assert_eq!(rq.payload, b"request on protocol /test/other");
+					let _ = rq.pending_response.send(super::OutgoingResponse {
+						result: Ok(b"this is a response on protocol /test/other".to_vec()),
+						reputation_changes: Vec::new(),
+						sent_feedback: Some(fb_tx),
+					});
+					fb_rx.await.unwrap();
+				}
+			});
 
 			build_swarm(vec![protocol_config_1_fallback, protocol_config_2].into_iter())
 		};
@@ -1603,132 +1632,269 @@ mod tests {
 		}
 
 		// Running `older_swarm`` in the background.
-		pool.spawner()
-			.spawn_obj({
-				async move {
-					loop {
-						_ = older_swarm.0.select_next_some().await;
-					}
-				}
-				.boxed()
-				.into()
-			})
-			.unwrap();
+		tokio::spawn(async move {
+			loop {
+				_ = older_swarm.0.select_next_some().await;
+			}
+		});
 
 		// Run the newer swarm. Attempt to make requests on all protocols.
 		let (mut swarm, _) = new_swarm;
 		let mut older_peer_id = None;
 
-		pool.run_until(async move {
-			let mut response_receiver = None;
-			// Try the new protocol with a fallback.
-			loop {
-				match swarm.select_next_some().await {
-					SwarmEvent::ConnectionEstablished { peer_id, .. } => {
-						older_peer_id = Some(peer_id);
-						let (sender, receiver) = oneshot::channel();
-						swarm.behaviour_mut().send_request(
-							&peer_id,
-							protocol_name_1.clone(),
-							b"request on protocol /test/req-resp/2".to_vec(),
-							Some((
-								b"request on protocol /test/req-resp/1".to_vec(),
-								protocol_config_1_fallback.name.clone(),
-							)),
-							sender,
-							IfDisconnected::ImmediateError,
-						);
-						response_receiver = Some(receiver);
-					},
-					SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
-						result.unwrap();
-						break
-					},
-					_ => {},
-				}
+		let mut response_receiver = None;
+		// Try the new protocol with a fallback.
+		loop {
+			match swarm.select_next_some().await {
+				SwarmEvent::ConnectionEstablished { peer_id, .. } => {
+					older_peer_id = Some(peer_id);
+					let (sender, receiver) = oneshot::channel();
+					swarm.behaviour_mut().send_request(
+						&peer_id,
+						protocol_name_1.clone(),
+						b"request on protocol /test/req-resp/2".to_vec(),
+						Some((
+							b"request on protocol /test/req-resp/1".to_vec(),
+							protocol_config_1_fallback.name.clone(),
+						)),
+						sender,
+						IfDisconnected::ImmediateError,
+					);
+					response_receiver = Some(receiver);
+				},
+				SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
+					result.unwrap();
+					break
+				},
+				_ => {},
 			}
-			assert_eq!(
-				response_receiver.unwrap().await.unwrap().unwrap(),
-				(
-					b"this is a response on protocol /test/req-resp/1".to_vec(),
-					protocol_name_1_fallback.clone()
-				)
-			);
-			// Try the old protocol with a useless fallback.
-			let (sender, response_receiver) = oneshot::channel();
-			swarm.behaviour_mut().send_request(
-				older_peer_id.as_ref().unwrap(),
-				protocol_name_1_fallback.clone(),
-				b"request on protocol /test/req-resp/1".to_vec(),
-				Some((
-					b"dummy request, will fail if processed".to_vec(),
-					protocol_config_1_fallback.name.clone(),
-				)),
-				sender,
-				IfDisconnected::ImmediateError,
-			);
-			loop {
-				match swarm.select_next_some().await {
-					SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
-						result.unwrap();
-						break
-					},
-					_ => {},
-				}
+		}
+		assert_eq!(
+			response_receiver.unwrap().await.unwrap().unwrap(),
+			(
+				b"this is a response on protocol /test/req-resp/1".to_vec(),
+				protocol_name_1_fallback.clone()
+			)
+		);
+		// Try the old protocol with a useless fallback.
+		let (sender, response_receiver) = oneshot::channel();
+		swarm.behaviour_mut().send_request(
+			older_peer_id.as_ref().unwrap(),
+			protocol_name_1_fallback.clone(),
+			b"request on protocol /test/req-resp/1".to_vec(),
+			Some((
+				b"dummy request, will fail if processed".to_vec(),
+				protocol_config_1_fallback.name.clone(),
+			)),
+			sender,
+			IfDisconnected::ImmediateError,
+		);
+		loop {
+			match swarm.select_next_some().await {
+				SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
+					result.unwrap();
+					break
+				},
+				_ => {},
 			}
-			assert_eq!(
-				response_receiver.await.unwrap().unwrap(),
-				(
-					b"this is a response on protocol /test/req-resp/1".to_vec(),
-					protocol_name_1_fallback.clone()
-				)
-			);
-			// Try the new protocol with no fallback. Should fail.
-			let (sender, response_receiver) = oneshot::channel();
-			swarm.behaviour_mut().send_request(
-				older_peer_id.as_ref().unwrap(),
-				protocol_name_1.clone(),
-				b"request on protocol /test/req-resp-2".to_vec(),
-				None,
-				sender,
-				IfDisconnected::ImmediateError,
-			);
-			loop {
-				match swarm.select_next_some().await {
-					SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
-						assert_matches!(
-							result.unwrap_err(),
-							RequestFailure::Network(OutboundFailure::UnsupportedProtocols)
-						);
-						break
-					},
-					_ => {},
-				}
+		}
+		assert_eq!(
+			response_receiver.await.unwrap().unwrap(),
+			(
+				b"this is a response on protocol /test/req-resp/1".to_vec(),
+				protocol_name_1_fallback.clone()
+			)
+		);
+		// Try the new protocol with no fallback. Should fail.
+		let (sender, response_receiver) = oneshot::channel();
+		swarm.behaviour_mut().send_request(
+			older_peer_id.as_ref().unwrap(),
+			protocol_name_1.clone(),
+			b"request on protocol /test/req-resp-2".to_vec(),
+			None,
+			sender,
+			IfDisconnected::ImmediateError,
+		);
+		loop {
+			match swarm.select_next_some().await {
+				SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
+					assert_matches!(
+						result.unwrap_err(),
+						RequestFailure::Network(OutboundFailure::UnsupportedProtocols)
+					);
+					break
+				},
+				_ => {},
 			}
-			assert!(response_receiver.await.unwrap().is_err());
-			// Try the other protocol with no fallback.
-			let (sender, response_receiver) = oneshot::channel();
-			swarm.behaviour_mut().send_request(
-				older_peer_id.as_ref().unwrap(),
-				protocol_name_2.clone(),
-				b"request on protocol /test/other".to_vec(),
-				None,
-				sender,
-				IfDisconnected::ImmediateError,
-			);
+		}
+		assert!(response_receiver.await.unwrap().is_err());
+		// Try the other protocol with no fallback.
+		let (sender, response_receiver) = oneshot::channel();
+		swarm.behaviour_mut().send_request(
+			older_peer_id.as_ref().unwrap(),
+			protocol_name_2.clone(),
+			b"request on protocol /test/other".to_vec(),
+			None,
+			sender,
+			IfDisconnected::ImmediateError,
+		);
+		loop {
+			match swarm.select_next_some().await {
+				SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
+					result.unwrap();
+					break
+				},
+				_ => {},
+			}
+		}
+		assert_eq!(
+			response_receiver.await.unwrap().unwrap(),
+			(b"this is a response on protocol /test/other".to_vec(), protocol_name_2.clone())
+		);
+	}
+
+	/// This test ensures the `RequestResponsesBehaviour` propagates back the Request::Timeout error
+	/// even if the libp2p component hangs.
+	///
+	/// For testing purposes, the communication happens on the `/test/req-resp/1` protocol.
+	///
+	/// This is achieved by:
+	/// - Two swarms are connected, the first one is slow to respond and has the timeout set to 10
+	///   seconds. The second swarm is configured with a timeout of 10 seconds in libp2p, however in
+	///   substrate this is set to 1 second.
+	///
+	/// - The first swarm introduces a delay of 2 seconds before responding to the request.
+	///
+	/// - The second swarm must enforce the 1 second timeout.
+	#[tokio::test]
+	async fn enforce_outbound_timeouts() {
+		const REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
+		const REQUEST_TIMEOUT_SHORT: Duration = Duration::from_secs(1);
+
+		// These swarms only speaks protocol_name.
+		let protocol_name = ProtocolName::from("/test/req-resp/1");
+
+		let protocol_config = ProtocolConfig {
+			name: protocol_name.clone(),
+			fallback_names: Vec::new(),
+			max_request_size: 1024,
+			max_response_size: 1024 * 1024,
+			request_timeout: REQUEST_TIMEOUT, // <-- important for the test
+			inbound_queue: None,
+		};
+
+		// Build swarms whose behaviour is [`RequestResponsesBehaviour`].
+		let (mut first_swarm, _) = {
+			let (tx, mut rx) = async_channel::bounded::<IncomingRequest>(64);
+
+			tokio::spawn(async move {
+				if let Some(rq) = rx.next().await {
+					assert_eq!(rq.payload, b"this is a request");
+
+					// Sleep for more than `REQUEST_TIMEOUT_SHORT` and less than
+					// `REQUEST_TIMEOUT`.
+					tokio::time::sleep(REQUEST_TIMEOUT_SHORT * 2).await;
+
+					// By the time the response is sent back, the second swarm
+					// received Timeout.
+					let _ = rq.pending_response.send(super::OutgoingResponse {
+						result: Ok(b"Second swarm already timedout".to_vec()),
+						reputation_changes: Vec::new(),
+						sent_feedback: None,
+					});
+				}
+			});
+
+			let mut protocol_config = protocol_config.clone();
+			protocol_config.inbound_queue = Some(tx);
+
+			build_swarm(iter::once(protocol_config))
+		};
+
+		let (mut second_swarm, second_address) = {
+			let (tx, mut rx) = async_channel::bounded::<IncomingRequest>(64);
+
+			tokio::spawn(async move {
+				while let Some(rq) = rx.next().await {
+					let _ = rq.pending_response.send(super::OutgoingResponse {
+						result: Ok(b"This is the response".to_vec()),
+						reputation_changes: Vec::new(),
+						sent_feedback: None,
+					});
+				}
+			});
+			let mut protocol_config = protocol_config.clone();
+			protocol_config.inbound_queue = Some(tx);
+
+			build_swarm(iter::once(protocol_config.clone()))
+		};
+		// Modify the second swarm to have a shorter timeout.
+		second_swarm
+			.behaviour_mut()
+			.protocols
+			.get_mut(&protocol_name)
+			.unwrap()
+			.request_timeout = REQUEST_TIMEOUT_SHORT;
+
+		// Ask first swarm to dial the second swarm.
+		{
+			Swarm::dial(&mut first_swarm, second_address).unwrap();
+		}
+
+		// Running the first swarm in the background until a `InboundRequest` event happens,
+		// which is a hint about the test having ended.
+		tokio::spawn(async move {
 			loop {
-				match swarm.select_next_some().await {
-					SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
-						result.unwrap();
-						break
+				let event = first_swarm.select_next_some().await;
+				match event {
+					SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => {
+						assert!(result.is_ok());
+						break;
+					},
+					SwarmEvent::ConnectionClosed { .. } => {
+						break;
 					},
 					_ => {},
 				}
 			}
-			assert_eq!(
-				response_receiver.await.unwrap().unwrap(),
-				(b"this is a response on protocol /test/other".to_vec(), protocol_name_2.clone())
-			);
 		});
+
+		// Run the second swarm.
+		// - on connection established send the request to the first swarm
+		// - expect to receive a timeout
+		let mut response_receiver = None;
+		loop {
+			let event = second_swarm.select_next_some().await;
+
+			match event {
+				SwarmEvent::ConnectionEstablished { peer_id, .. } => {
+					let (sender, receiver) = oneshot::channel();
+					second_swarm.behaviour_mut().send_request(
+						&peer_id,
+						protocol_name.clone(),
+						b"this is a request".to_vec(),
+						None,
+						sender,
+						IfDisconnected::ImmediateError,
+					);
+					assert!(response_receiver.is_none());
+					response_receiver = Some(receiver);
+				},
+				SwarmEvent::ConnectionClosed { .. } => {
+					break;
+				},
+				SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => {
+					assert!(result.is_err());
+					break
+				},
+				_ => {},
+			}
+		}
+
+		// Expect the timeout.
+		match response_receiver.unwrap().await.unwrap().unwrap_err() {
+			RequestFailure::Network(OutboundFailure::Timeout) => {},
+			request_failure => panic!("Unexpected failure: {request_failure:?}"),
+		}
 	}
 }
-- 
GitLab


From 89b022842c7ab922de5bf026cd45e43b9cd8c654 Mon Sep 17 00:00:00 2001
From: FereMouSiopi <FereMouSiopi@proton.me>
Date: Wed, 22 Jan 2025 10:08:59 -0800
Subject: [PATCH 098/116] Migrate `pallet-insecure-randomness-collective-flip`
 to umbrella crate (#6738)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Part of https://github.com/paritytech/polkadot-sdk/issues/6504

---------

Co-authored-by: command-bot <>
Co-authored-by: Bastian Köcher <git@kchr.de>
---
 Cargo.lock                                    |  6 +---
 .../Cargo.toml                                | 18 ++----------
 .../src/lib.rs                                | 28 ++++++-------------
 3 files changed, 13 insertions(+), 39 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock
index 7e41b7e9937..a10def370be 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -13925,14 +13925,10 @@ dependencies = [
 name = "pallet-insecure-randomness-collective-flip"
 version = "16.0.0"
 dependencies = [
- "frame-support 28.0.0",
- "frame-system 28.0.0",
  "parity-scale-codec",
+ "polkadot-sdk-frame 0.1.0",
  "safe-mix",
  "scale-info",
- "sp-core 28.0.0",
- "sp-io 30.0.0",
- "sp-runtime 31.0.1",
 ]
 
 [[package]]
diff --git a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml
index 1682b52dfbf..789f130423a 100644
--- a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml
+++ b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml
@@ -17,30 +17,18 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { features = ["derive"], workspace = true }
-frame-support = { workspace = true }
-frame-system = { workspace = true }
+frame = { workspace = true, features = ["runtime"] }
 safe-mix = { workspace = true }
 scale-info = { features = ["derive"], workspace = true }
-sp-runtime = { workspace = true }
-
-[dev-dependencies]
-sp-core = { workspace = true, default-features = true }
-sp-io = { workspace = true, default-features = true }
 
 [features]
 default = ["std"]
 std = [
 	"codec/std",
-	"frame-support/std",
-	"frame-system/std",
+	"frame/std",
 	"safe-mix/std",
 	"scale-info/std",
-	"sp-core/std",
-	"sp-io/std",
-	"sp-runtime/std",
 ]
 try-runtime = [
-	"frame-support/try-runtime",
-	"frame-system/try-runtime",
-	"sp-runtime/try-runtime",
+	"frame/try-runtime",
 ]
diff --git a/substrate/frame/insecure-randomness-collective-flip/src/lib.rs b/substrate/frame/insecure-randomness-collective-flip/src/lib.rs
index b605b4d0858..0e7e8001d5d 100644
--- a/substrate/frame/insecure-randomness-collective-flip/src/lib.rs
+++ b/substrate/frame/insecure-randomness-collective-flip/src/lib.rs
@@ -42,13 +42,11 @@
 //! ### Example - Get random seed for the current block
 //!
 //! ```
-//! use frame_support::traits::Randomness;
+//! use frame::{prelude::*, traits::Randomness};
 //!
-//! #[frame_support::pallet]
+//! #[frame::pallet]
 //! pub mod pallet {
 //!     use super::*;
-//!     use frame_support::pallet_prelude::*;
-//!     use frame_system::pallet_prelude::*;
 //!
 //!     #[pallet::pallet]
 //!     pub struct Pallet<T>(_);
@@ -73,9 +71,7 @@
 use safe_mix::TripletMix;
 
 use codec::Encode;
-use frame_support::{pallet_prelude::Weight, traits::Randomness};
-use frame_system::pallet_prelude::BlockNumberFor;
-use sp_runtime::traits::{Hash, Saturating};
+use frame::{prelude::*, traits::Randomness};
 
 const RANDOM_MATERIAL_LEN: u32 = 81;
 
@@ -87,10 +83,9 @@ fn block_number_to_index<T: Config>(block_number: BlockNumberFor<T>) -> usize {
 
 pub use pallet::*;
 
-#[frame_support::pallet]
+#[frame::pallet]
 pub mod pallet {
 	use super::*;
-	use frame_support::pallet_prelude::*;
 
 	#[pallet::pallet]
 	pub struct Pallet<T>(_);
@@ -167,19 +162,14 @@ impl<T: Config> Randomness<T::Hash, BlockNumberFor<T>> for Pallet<T> {
 mod tests {
 	use super::*;
 	use crate as pallet_insecure_randomness_collective_flip;
-
-	use sp_core::H256;
-	use sp_runtime::{traits::Header as _, BuildStorage};
-
-	use frame_support::{
-		derive_impl, parameter_types,
-		traits::{OnInitialize, Randomness},
+	use frame::{
+		testing_prelude::{frame_system::limits, *},
+		traits::Header as _,
 	};
-	use frame_system::limits;
 
 	type Block = frame_system::mocking::MockBlock<Test>;
 
-	frame_support::construct_runtime!(
+	construct_runtime!(
 		pub enum Test
 		{
 			System: frame_system,
@@ -199,7 +189,7 @@ mod tests {
 
 	impl pallet_insecure_randomness_collective_flip::Config for Test {}
 
-	fn new_test_ext() -> sp_io::TestExternalities {
+	fn new_test_ext() -> TestExternalities {
 		let t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 		t.into()
 	}
-- 
GitLab


From 5772b9dbde8f88718ec5c6409f444d6e5b4e4e03 Mon Sep 17 00:00:00 2001
From: PG Herveou <pgherveou@gmail.com>
Date: Thu, 23 Jan 2025 10:57:06 +0100
Subject: [PATCH 099/116] [pallet-revive] fee estimation fixes (#7281)

- Fix the EVM fee cost estimation.
The estimation shown in EVM wallet was using Native instead of EVM
decimals
- Remove the precise code length estimation in dry run call.
Over-estimating is fine, since extra gas is refunded anyway.
- Ensure that the estimated fee calculated from gas_price x gas use the
encoded weight & deposit limit instead of the exact one calculated by
the dry-run. Else we can end up with a fee that is lower than the actual
fee paid by the user

---------

Co-authored-by: command-bot <>
---
 .../assets/asset-hub-westend/src/lib.rs       |   6 +-
 prdoc/pr_7281.prdoc                           |  13 ++
 substrate/bin/node/runtime/src/lib.rs         |   4 +
 .../rpc/examples/js/src/build-contracts.ts    |   2 +-
 .../rpc/examples/js/src/geth-diff.test.ts     | 162 +++++++++---------
 .../frame/revive/rpc/examples/js/src/util.ts  |  11 +-
 .../frame/revive/rpc/revive_chain.metadata    | Bin 661585 -> 671115 bytes
 substrate/frame/revive/rpc/src/client.rs      |  17 +-
 .../frame/revive/src/benchmarking/mod.rs      |   6 +-
 substrate/frame/revive/src/evm/gas_encoder.rs |  11 ++
 substrate/frame/revive/src/evm/runtime.rs     |  64 +++----
 substrate/frame/revive/src/exec.rs            |   6 +-
 substrate/frame/revive/src/lib.rs             | 129 ++++++++------
 substrate/frame/revive/src/primitives.rs      |   8 +
 14 files changed, 249 insertions(+), 190 deletions(-)
 create mode 100644 prdoc/pr_7281.prdoc

diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
index f56c4568f2d..ecbe1fb0e62 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs
@@ -129,7 +129,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
 	spec_name: alloc::borrow::Cow::Borrowed("westmint"),
 	impl_name: alloc::borrow::Cow::Borrowed("westmint"),
 	authoring_version: 1,
-	spec_version: 1_017_005,
+	spec_version: 1_017_006,
 	impl_version: 0,
 	apis: RUNTIME_API_VERSIONS,
 	transaction_version: 16,
@@ -2189,6 +2189,10 @@ impl_runtime_apis! {
 			Revive::evm_balance(&address)
 		}
 
+		fn block_gas_limit() -> U256 {
+			Revive::evm_block_gas_limit()
+		}
+
 		fn nonce(address: H160) -> Nonce {
 			let account = <Runtime as pallet_revive::Config>::AddressMapper::to_account_id(&address);
 			System::account_nonce(account)
diff --git a/prdoc/pr_7281.prdoc b/prdoc/pr_7281.prdoc
new file mode 100644
index 00000000000..33e04c419ba
--- /dev/null
+++ b/prdoc/pr_7281.prdoc
@@ -0,0 +1,13 @@
+title: '[pallet-revive] fix eth fee estimation'
+doc:
+- audience: Runtime Dev
+  description: |-
+    Fix EVM fee cost estimation.
+    The current estimation was shown in Native and not EVM decimal currency.
+crates:
+- name: asset-hub-westend-runtime
+  bump: minor
+- name: pallet-revive-eth-rpc
+  bump: minor
+- name: pallet-revive
+  bump: minor
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index 26f4dacf9a1..220929fdfd8 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -3301,6 +3301,10 @@ impl_runtime_apis! {
 			Revive::evm_balance(&address)
 		}
 
+		fn block_gas_limit() -> U256 {
+			Revive::evm_block_gas_limit()
+		}
+
 		fn nonce(address: H160) -> Nonce {
 			let account = <Runtime as pallet_revive::Config>::AddressMapper::to_account_id(&address);
 			System::account_nonce(account)
diff --git a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts
index f26f275ec3d..b162b8be0ad 100644
--- a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts
+++ b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts
@@ -55,7 +55,7 @@ for (const file of input) {
 	}
 
 	console.log('Compiling with revive...')
-	const reviveOut = await compile(input, { bin: 'resolc' })
+	const reviveOut = await compile(input)
 
 	for (const contracts of Object.values(reviveOut.contracts)) {
 		for (const [name, contract] of Object.entries(contracts)) {
diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
index 86b8ec50bd6..2a4ff2edcdf 100644
--- a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
+++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts
@@ -12,62 +12,64 @@ import { ErrorsAbi } from '../abi/Errors'
 import { FlipperCallerAbi } from '../abi/FlipperCaller'
 import { FlipperAbi } from '../abi/Flipper'
 import { Subprocess, spawn } from 'bun'
+import { fail } from 'node:assert'
 
 const procs: Subprocess[] = []
-beforeAll(async () => {
-	if (!process.env.USE_LIVE_SERVERS) {
-		procs.push(
-			// Run geth on port 8546
-			await (async () => {
-				killProcessOnPort(8546)
-				const proc = spawn(
-					'geth --http --http.api web3,eth,debug,personal,net --http.port 8546 --dev --verbosity 0'.split(
-						' '
-					),
-					{ stdout: Bun.file('/tmp/geth.out.log'), stderr: Bun.file('/tmp/geth.err.log') }
-				)
+if (!process.env.USE_LIVE_SERVERS) {
+	procs.push(
+		// Run geth on port 8546
+		await (async () => {
+			killProcessOnPort(8546)
+			console.log('Starting geth')
+			const proc = spawn(
+				'geth --http --http.api web3,eth,debug,personal,net --http.port 8546 --dev --verbosity 0'.split(
+					' '
+				),
+				{ stdout: Bun.file('/tmp/geth.out.log'), stderr: Bun.file('/tmp/geth.err.log') }
+			)
 
-				await waitForHealth('http://localhost:8546').catch()
-				return proc
-			})(),
-			//Run the substate node
-			(() => {
-				killProcessOnPort(9944)
-				return spawn(
-					[
-						'./target/debug/substrate-node',
-						'--dev',
-						'-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug',
-					],
-					{
-						stdout: Bun.file('/tmp/kitchensink.out.log'),
-						stderr: Bun.file('/tmp/kitchensink.err.log'),
-						cwd: polkadotSdkPath,
-					}
-				)
-			})(),
-			// Run eth-rpc on 8545
-			await (async () => {
-				killProcessOnPort(8545)
-				const proc = spawn(
-					[
-						'./target/debug/eth-rpc',
-						'--dev',
-						'--node-rpc-url=ws://localhost:9944',
-						'-l=rpc-metrics=debug,eth-rpc=debug',
-					],
-					{
-						stdout: Bun.file('/tmp/eth-rpc.out.log'),
-						stderr: Bun.file('/tmp/eth-rpc.err.log'),
-						cwd: polkadotSdkPath,
-					}
-				)
-				await waitForHealth('http://localhost:8545').catch()
-				return proc
-			})()
-		)
-	}
-})
+			await waitForHealth('http://localhost:8546').catch()
+			return proc
+		})(),
+		//Run the substate node
+		(() => {
+			killProcessOnPort(9944)
+			console.log('Starting substrate node')
+			return spawn(
+				[
+					'./target/debug/substrate-node',
+					'--dev',
+					'-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug',
+				],
+				{
+					stdout: Bun.file('/tmp/kitchensink.out.log'),
+					stderr: Bun.file('/tmp/kitchensink.err.log'),
+					cwd: polkadotSdkPath,
+				}
+			)
+		})(),
+		// Run eth-rpc on 8545
+		await (async () => {
+			killProcessOnPort(8545)
+			console.log('Starting eth-rpc')
+			const proc = spawn(
+				[
+					'./target/debug/eth-rpc',
+					'--dev',
+					'--node-rpc-url=ws://localhost:9944',
+					'-l=rpc-metrics=debug,eth-rpc=debug',
+				],
+				{
+					stdout: Bun.file('/tmp/eth-rpc.out.log'),
+					stderr: Bun.file('/tmp/eth-rpc.err.log'),
+					cwd: polkadotSdkPath,
+				}
+			)
+			await waitForHealth('http://localhost:8545').catch()
+			return proc
+		})()
+	)
+}
 
 afterEach(() => {
 	jsonRpcErrors.length = 0
@@ -88,7 +90,7 @@ for (const env of envs) {
 			{
 				const hash = await env.serverWallet.deployContract({
 					abi: ErrorsAbi,
-					bytecode: getByteCode('errors', env.evm),
+					bytecode: getByteCode('Errors', env.evm),
 				})
 				const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash })
 				if (!deployReceipt.contractAddress)
@@ -99,7 +101,7 @@ for (const env of envs) {
 			{
 				const hash = await env.serverWallet.deployContract({
 					abi: FlipperAbi,
-					bytecode: getByteCode('flipper', env.evm),
+					bytecode: getByteCode('Flipper', env.evm),
 				})
 				const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash })
 				if (!deployReceipt.contractAddress)
@@ -111,7 +113,7 @@ for (const env of envs) {
 				const hash = await env.serverWallet.deployContract({
 					abi: FlipperCallerAbi,
 					args: [flipperAddr],
-					bytecode: getByteCode('flipperCaller', env.evm),
+					bytecode: getByteCode('FlipperCaller', env.evm),
 				})
 				const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash })
 				if (!deployReceipt.contractAddress)
@@ -121,13 +123,13 @@ for (const env of envs) {
 		})
 
 		test('triggerAssertError', async () => {
-			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
 					address: errorsAddr,
 					abi: ErrorsAbi,
 					functionName: 'triggerAssertError',
 				})
+				fail('Expect call to fail')
 			} catch (err) {
 				const lastJsonRpcError = jsonRpcErrors.pop()
 				expect(lastJsonRpcError?.code).toBe(3)
@@ -139,13 +141,13 @@ for (const env of envs) {
 		})
 
 		test('triggerRevertError', async () => {
-			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
 					address: errorsAddr,
 					abi: ErrorsAbi,
 					functionName: 'triggerRevertError',
 				})
+				fail('Expect call to fail')
 			} catch (err) {
 				const lastJsonRpcError = jsonRpcErrors.pop()
 				expect(lastJsonRpcError?.code).toBe(3)
@@ -157,13 +159,13 @@ for (const env of envs) {
 		})
 
 		test('triggerDivisionByZero', async () => {
-			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
 					address: errorsAddr,
 					abi: ErrorsAbi,
 					functionName: 'triggerDivisionByZero',
 				})
+				expect.assertions(3)
 			} catch (err) {
 				const lastJsonRpcError = jsonRpcErrors.pop()
 				expect(lastJsonRpcError?.code).toBe(3)
@@ -177,13 +179,13 @@ for (const env of envs) {
 		})
 
 		test('triggerOutOfBoundsError', async () => {
-			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
 					address: errorsAddr,
 					abi: ErrorsAbi,
 					functionName: 'triggerOutOfBoundsError',
 				})
+				fail('Expect call to fail')
 			} catch (err) {
 				const lastJsonRpcError = jsonRpcErrors.pop()
 				expect(lastJsonRpcError?.code).toBe(3)
@@ -197,13 +199,13 @@ for (const env of envs) {
 		})
 
 		test('triggerCustomError', async () => {
-			expect.assertions(3)
 			try {
 				await env.accountWallet.readContract({
 					address: errorsAddr,
 					abi: ErrorsAbi,
 					functionName: 'triggerCustomError',
 				})
+				fail('Expect call to fail')
 			} catch (err) {
 				const lastJsonRpcError = jsonRpcErrors.pop()
 				expect(lastJsonRpcError?.code).toBe(3)
@@ -215,15 +217,15 @@ for (const env of envs) {
 		})
 
 		test('eth_call (not enough funds)', async () => {
-			expect.assertions(3)
 			try {
-				await env.accountWallet.simulateContract({
+				await env.emptyWallet.simulateContract({
 					address: errorsAddr,
 					abi: ErrorsAbi,
 					functionName: 'valueMatch',
 					value: parseEther('10'),
 					args: [parseEther('10')],
 				})
+				fail('Expect call to fail')
 			} catch (err) {
 				const lastJsonRpcError = jsonRpcErrors.pop()
 				expect(lastJsonRpcError?.code).toBe(-32000)
@@ -233,12 +235,15 @@ for (const env of envs) {
 		})
 
 		test('eth_call transfer (not enough funds)', async () => {
-			expect.assertions(3)
+			const value = parseEther('10')
+			const balance = await env.emptyWallet.getBalance(env.emptyWallet.account)
+			expect(balance, 'Balance should be less than 10').toBeLessThan(value)
 			try {
-				await env.accountWallet.sendTransaction({
+				await env.emptyWallet.sendTransaction({
 					to: '0x75E480dB528101a381Ce68544611C169Ad7EB342',
-					value: parseEther('10'),
+					value,
 				})
+				fail('Expect call to fail')
 			} catch (err) {
 				const lastJsonRpcError = jsonRpcErrors.pop()
 				expect(lastJsonRpcError?.code).toBe(-32000)
@@ -248,15 +253,15 @@ for (const env of envs) {
 		})
 
 		test('eth_estimate (not enough funds)', async () => {
-			expect.assertions(3)
 			try {
-				await env.accountWallet.estimateContractGas({
+				await env.emptyWallet.estimateContractGas({
 					address: errorsAddr,
 					abi: ErrorsAbi,
 					functionName: 'valueMatch',
 					value: parseEther('10'),
 					args: [parseEther('10')],
 				})
+				fail('Expect call to fail')
 			} catch (err) {
 				const lastJsonRpcError = jsonRpcErrors.pop()
 				expect(lastJsonRpcError?.code).toBe(-32000)
@@ -266,15 +271,15 @@ for (const env of envs) {
 		})
 
 		test('eth_estimate call caller (not enough funds)', async () => {
-			expect.assertions(3)
 			try {
-				await env.accountWallet.estimateContractGas({
+				await env.emptyWallet.estimateContractGas({
 					address: errorsAddr,
 					abi: ErrorsAbi,
 					functionName: 'valueMatch',
 					value: parseEther('10'),
 					args: [parseEther('10')],
 				})
+				fail('Expect call to fail')
 			} catch (err) {
 				const lastJsonRpcError = jsonRpcErrors.pop()
 				expect(lastJsonRpcError?.code).toBe(-32000)
@@ -284,7 +289,6 @@ for (const env of envs) {
 		})
 
 		test('eth_estimate (revert)', async () => {
-			expect.assertions(3)
 			try {
 				await env.serverWallet.estimateContractGas({
 					address: errorsAddr,
@@ -293,6 +297,7 @@ for (const env of envs) {
 					value: parseEther('11'),
 					args: [parseEther('10')],
 				})
+				fail('Expect call to fail')
 			} catch (err) {
 				const lastJsonRpcError = jsonRpcErrors.pop()
 				expect(lastJsonRpcError?.code).toBe(3)
@@ -313,17 +318,16 @@ for (const env of envs) {
 		})
 
 		test('eth_estimate (not enough funds to cover gas specified)', async () => {
-			expect.assertions(4)
+			let balance = await env.serverWallet.getBalance(env.emptyWallet.account)
+			expect(balance).toBe(0n)
 			try {
-				let balance = await env.serverWallet.getBalance(env.accountWallet.account)
-				expect(balance).toBe(0n)
-
-				await env.accountWallet.estimateContractGas({
+				await env.emptyWallet.estimateContractGas({
 					address: errorsAddr,
 					abi: ErrorsAbi,
 					functionName: 'setState',
 					args: [true],
 				})
+				fail('Expect call to fail')
 			} catch (err) {
 				const lastJsonRpcError = jsonRpcErrors.pop()
 				expect(lastJsonRpcError?.code).toBe(-32000)
@@ -333,7 +337,7 @@ for (const env of envs) {
 		})
 
 		test('eth_estimate (no gas specified)', async () => {
-			let balance = await env.serverWallet.getBalance(env.accountWallet.account)
+			let balance = await env.serverWallet.getBalance(env.emptyWallet.account)
 			expect(balance).toBe(0n)
 
 			const data = encodeFunctionData({
@@ -342,12 +346,12 @@ for (const env of envs) {
 				args: [true],
 			})
 
-			await env.accountWallet.request({
+			await env.emptyWallet.request({
 				method: 'eth_estimateGas',
 				params: [
 					{
 						data,
-						from: env.accountWallet.account.address,
+						from: env.emptyWallet.account.address,
 						to: errorsAddr,
 					},
 				],
diff --git a/substrate/frame/revive/rpc/examples/js/src/util.ts b/substrate/frame/revive/rpc/examples/js/src/util.ts
index bdc64eea1ef..2991bdfe636 100644
--- a/substrate/frame/revive/rpc/examples/js/src/util.ts
+++ b/substrate/frame/revive/rpc/examples/js/src/util.ts
@@ -85,7 +85,16 @@ export async function createEnv(name: 'geth' | 'kitchensink') {
 		chain,
 	}).extend(publicActions)
 
-	return { serverWallet, accountWallet, evm: name == 'geth' }
+	const emptyWallet = createWalletClient({
+		account: privateKeyToAccount(
+			'0x4450c571bae82da0528ecf76fcf7079e12ecc46dc873c9cacb6db8b75ed22f41',
+			{ nonceManager }
+		),
+		transport,
+		chain,
+	}).extend(publicActions)
+
+	return { serverWallet, emptyWallet, accountWallet, evm: name == 'geth' }
 }
 
 export function wait(ms: number) {
diff --git a/substrate/frame/revive/rpc/revive_chain.metadata b/substrate/frame/revive/rpc/revive_chain.metadata
index a03c95b4944f663225642b1678ef66aaccec3fb5..ff365892a265e1fd9b59f6811ea1c59642b65d91 100644
GIT binary patch
delta 25763
zcmbWg4SZC^)jxjc?%g~0?n`z92_%rf28j@bB~ieD0SOQ!%1Z(PMTyCh-H?@JH`(0?
zQ8Ca;3o2^d!ia)`6_r*Lo|axwQBb4OM_OY=#frdFQBhITih>sXf9Kx2$%f$b^#Aj-
z=Dy9GnK^HB=A1J_$ByB5{xF<21Y$myGj%8Z+(L5V|FX;?*?*WUeW{WK@q&~BQW>vF
zDI&i3=9KYbfU+xyk{<6&$q`#<yivY_{fQ`UvOMm0T_&!i>qR9izS}j3tct(qnorio
zN4Y1Fb@4{`<>EHV+!iGx-tHbuw#SdV^QB#s{E_`sW6h3_P0c2|;}xm-;vUMLwxEFr
zQ?p2W{F&4V<WT(E)O>O{KCDkZ>5O07r--QWb$w<?PK7+l?s?og@V>rTBsHGh_kt0V
zrUvQ*i+!<xe{RgTBp6yO3F!*i+~}*X55%hciH}uLe2w3$rq#|7@{6O<Kx|H6sW0M>
zN<yYW?lp`1_=l>9@n!t@lA8jdSkav3P%PLGm?+E@M(Z`B(;|Vu&4Gv{WGf__B-f2r
z)f(U8#m<Z@9bDI|@#9NF=tZa|P%u%b_`?-_|7)<VBC90gEAs{Yj`-nm8Gq<|=_9IZ
z(v;>%BoL};nHgLh@x@rd57vRLg|*>GO`vLNFjnV}_?Bjn{K>w04r_L8QN=_dO_@i`
z@p~2-A3P0u^?=_~8wodf>ccflqMlgTQx~rHM@M)#B#$rD(p5ZG=Zkr2!p-%5PbeJo
zEDCt)0`>kG!KepK+iL=P4c<08w%2;WAKoeY;WaW(8~Ks7h>&?~pF+lI(`AyDcwp-(
ziI^YQrcbQ58TSt*%5G4}3Xznv0S<B(N!1>Akid}Cb0{;Dvd%nm11n1>avy^zBRt*Y
z(Cnkg$WnT%dr>&#S6HfD%+Rh`MxHI|Gszr#RgH;CDef-p-#RGkJp-qKsHZUyX$VH6
z!EmTP5RKNiczhlZCt7ztY<Y!JQc(v?L;?-rn@~Ac7w`a$I#NC1kp9dWEC7u9JvEU4
z_%RBI9M}ANFy`^a!mSO#8n9)H*Hc>S!Htg?b!vQ}B#e*Hp)CqTW1c`QP#^Q4;Rc^S
zkRYDNhv()RP!FRBV+f1t171%Bx`gief+5Jj?FQhk9veKs_Kp>;S@i+`;y}bJyuxeL
z1_B)XQeW^UFiMcsKVi*gmt11K^4HhwBKTj#{}%i&Y1izs{?gW3mdxx4LCG*)x~E`F
zl2jp(p(6|wR5gaf^=YI??~T98kglMAGbx%JURG2we!MPDrT&QmDKdoae=SoHxkzV%
zDo9133IwcL#JM_y3<oGv90ZaPSk@Sfw6Lb<rJ-@(!NC^^`Ti+7ML>{HU@5S~1s|vj
z*L#H#Wxj|H$}kX#j(?t&zaXWzP6>x<gNw(HH!78(*z8DfaWGVU`O%S{>T||YJzgi5
zJ~U1sh>cOg6ExU@H*@*dsCdo*&r{JKjK(6t)<w-wW}LW2jp*fLNO$#~Lr(x{2t!AC
z!nKL9#llMhp{U0IXut!yJUY5AFX!M4B$@T~_nuI5!y*)7O9so%R^Dw$e0{yACg6+e
zyyvT_X>Mq)2ksL+)45J19?$jFQ-je)U#zCC0-6$Ztp!4>D?K;)B0(Q|7P!e*-^_J1
z#6ZVaU|FE1IR@eLaMkLq<X!Ul8-Qd3pRH(29}`$AycG3-+Q70{0Mf-}Pw$;R1Rn=7
z0?6hZ$NQ;^fv(v-99Q$uJ1&!vKurKVu77V1q3QbI%>Zlif{Cw1zM3T-AMg9L?q+Jj
z4UL#e(2Op()w9B(K=p_f9=!yYXW$GXgo%UG%op<L<M5_&HekiWto_)<N33f6LyzMc
z;#fV`&=P)skVEv<V_-U|z%4$LB{vsC2k{ApgKez$)u5lrI))>I*YRdG^Z8^+fUS5B
zt&BKgRd_23$<W_M;sv4pt%5kTex6Y*(Ud+NbTu|7Nu`<1p0}|ck}?zY3hP2K9;yhM
z3T)3YZ%9X^+7qlb5ZKDuh+_u`ulM+BVxWQ7GaF?iOM}tC2z>@JY3;ptq>h2Apx<Bu
zoys7*IZRzMd!HET@#*Yt3SJlMn~3Z#?@cEoD-3Cjn1xNI>Mmoz<oej8>p4_yjt2bR
zqB>s$TEELMU|a0gblL;UmW*4c8->u@ntC6Y+R!jvqVoTSzyAz<t-7rIEdI>y8)Gd6
z=?wnpi4)8;o~3m`Oe?@nqK2t~k{SPe2pYVMn@yZt=9F-bhSm|*>WA_@|M#96TuQ5(
z8TJQj`IM})fGMP0oV~(c!#av##_pDNUDQ38M<FQq0B%eu1ix2!el~v>D@^DOT|P0R
zws0E&!)^c|uxJ=hx;F6^c8x|as`uVHl=nWK^J@AHR!3RS{uu(FXy-p}rlPAELm3;h
znckXxIBzX!B5Re?R?LNlSy~t9o>n~3I^A5;%k@T$QQc7bJ#b?vQZV*^-sNl)H5L8<
zOpahe6sioN)BD_|=2~gmQOu#RorADq10Kw-bzxnDo0ZSey1@`QE}@T1L(8lKvT)0Y
zPm492;vs19Xw(;J@eDV`-7r|W`ib~L(b@nk=T<)^KInU$MZ%m*#u1&58Uf1Z_eTIC
zZ-fWaqOM>g;hTbfNGlhBnxt+=!{CunGgP!c5DDI-vj(4H0S`Lan4N;KnT#$*0!__m
z)bGu&anuJ;uThr=iLHFk+P787D4Jrt?HcGZ!>H(?KJ{Lj+*D2yHKMU_#J3o_GJ?en
zpX0dI4ud0H6V#_v^E)~ZMQqG<vF3=uCWb{g2qvM4^s;U>$;HQD@YkNO4u&cI-iy|u
z;ylWZd>{>A8GEg>#RYWz>ry|~y4SjjR5I&6>lV>xeAiyuXT4tJYs1a+=|DFA73)S4
zWbeLWEzS>8DPv9`+FTz~qzckNr4rIiiK5J2A_)ytz<-XC2o_JxxF>?pz^1oj4bj9_
zwOey>`8~fhvyOJF*Nx%|848YzRRu$};Rfb<)tW<Ew1QWyFI&lS?bLr;D`m?{>R?;O
zSTnSSx2%Jm;ws9HJtt-Enn6}cWR1B(S(Cg%F&>N+ig~ZEQ1ttUh*=gZl$oMs4OQ5_
zVrwR={M>rAxR$cL#nv1)<5g>lcIb0!nDlR>Wcd7WEHEcf6OQ-=k`{wa(6UCWI%(}g
zENzMEYrnJ>kag_EFRiU)gLcJN)~O<{w|83C^XtHGtZjsB(zc(nK269Lt@1l-JFX1Z
zyV=e&)~RFz`}T}=uWbX6CMlaY3!B*9@2#WyY{9d*l?wRr_Zh-Ajr?HUD~j7Gn^X;B
z{^(ijJ!GfW_@niZB<`fF?KP=CTV#=MBfHo!i|iA3V|l-h4q!J(awp4G<$>%*tNa1k
zr(G$_ABtojE3?W2*d$fHfV4BeDql<XvpuRj6BoNp&Laod7@K@2Imq_e<V*4nfcUc;
zxov}$0O<hT-P~XT(cN>y4FO5$FsR<4W!dF^#CZ^p773pv;Sgm_G3x+rfkR#_lf&A+
zKJqg%ImTuUkSDTdGUavTn1=fzCpoU&IZXaUAt$tGzD%r+WBTA#s#4)NTQNo+mUaSf
z#FJFO4`?BD0}(pefk&+a8KB$UQr5giKNlVcaV!b6oMN40<m<>;He;-Oqj9-{eLGfO
zO-`}a%j81&3?+w15i5SodI39nnLMAIVeZGRBbmQYo^#RJ9z!NduOSmfXLO>lqlNM~
z*{YCZNx){aug1z&nahP0i`d7lBiQTX<g19v`i_^MCr<X}c)1MZ&$(Rwm{pd@sVt*N
z?n5N)l8N#JLIl=$g`5|kGdw3VU2#;K>$7wG9^{VnzV4x2o)QO1gF*?#;=c8n?7p|G
zeebL9f2kndI;}b6$EqRWGx03qQ?U6Mj(9zD3<U(N^{!t)q-92ZQ%&g_Ke>9=+<L69
zInI06o**V#j~az^W2Hyy)6J#3-y8;5?0k?ewMehb>oUR2&ZDfSRv%oKQv`EP*T~}K
z!-w?GR2=79-Li6roK~5k+wJJOY4r&tpD$mHX`s|E-CDtwX~MkZo)q22;>$S9@=-6z
z(ygPQ-%T>gU?-u<P+o<<^~PV^l0H|e=FiEsOgq;rNSlkXIxvBoC87|fghAiEcJP>{
zK5wSR$H>#9uUERaci)1GLzwaODU;jeSlT28#<IS*W?!QFf=sjfKOcins^d-dtd|%C
zk#;1FqN_~uIFc78lji8F^r&f72a-nA`u}f4=aOn;7oNGn8}3?WdzJ<w@Oc?lJSOs=
zC7IMsGlT41OOUS0i4~eCtxg(r_h)_73FCSMX9&aD;Ll=bzO<IKc5{i|LUSnC)Ew}X
zPSuT+7;wz}7&`Bq$GO72A*N$Rnw}Uew*Yir^q$Anl3ZgdYE&gGG_yv9(baRT`W{xh
zw3@dB=Tj^Qiz&%s=SH?+gL+B6Y3MTVJyPaa=}lh6@IAzE$H()3yBy+!U+q^V=|oJ3
zM3Rr7!B{vXz<Djwvf^`{KJa!$O&^`#do4%4!E19cA?SvEkmbB*O>Z@jr$Y>yt2r<E
z4;L}gGCnp=rSqdglKxoSYjLA@tgF3jXkR`K!ycT<`&rCABuQNyaNaH^Rw`*zq{2im
z3?Npi&TLhG?$MriA#b5X_nl4VZndO#a?ab%r0z9Jl}^!CsUp!{^4h~IOV60iCco~{
zI-hE~2RoW~MK^x~{&|hkc_fi@siy|!FidVk5UBL_rDiMKRE#tj2D?scvz+ePfiHF#
z$e(cP=+5lSY{gNL?Ah${*~iPLrz=Xvv`F~oKq!9f=8X8ukM@h_UeFJAMMg!PZ)yD4
zcbV~*9_`QHiA0DOKafLm*yAPgJT({7zPOwSdCWQu*2PFxJx%VHJyMYzQ^HHRbyT3h
z>cJmD|6>(}F)Th!UP#8VulZ#Hn|Y;tA=@`y9#vSZ2$*|9d|36x3gJ%H5nT(`u2(~`
zV1Zc^j<gtMgklb<AN%@BnH$(6r_0&6Ts>XRN^hd1(6G~rt_cJe*Tp6ZLWxoOzUgvW
zcBw)t$^wyuTW<<x<!Dj!rI(;ssS@9Y(R@8!zLd;lLrVd>ocT)Si;N+rJIK%=TxL}v
z_*L~mr?ZzzVbIKD&a32!dGopvuz(`~e+2!HSg8p3!CqS_xdorXT%$<3w&p7NHx?3L
zgJ#JgF{m(~ou)JQI(g{u21T07?Zzf}yygdLco;-z(CaT3^YIqg$78czC7%tREz_cw
zq@pdnfFvwek`30Cy{)&ED2HXrN>*7eXR+I6%l*kJwtluejI3oZ%$7%xHum{!d|6m7
zXS1PY^1{Ar6p}wbQacy3tPd+O7{6;+yiC3Z6?7Q1T%LzoxL<yrvg_E%a+u{CjDBCZ
z$sFaDq^@t$N15*yw(v3FX{+|w9QlvLv5kLl2pJw>yAq$3ovF1}$X5_?XA;`dcbNe9
zBmvyTR?L?(*oE`u87aFJk~IhG%y1|w2)p@*@wPp@on4Bs&*)Toy9r}|5{z~OM$R?z
zLfd{`zFiRx80GtQnB`N|Zc=q9Z1xRue&2&img&1E8CdNpLH|MKzE&O}9#q(h%j9%6
z=rP#ErPs=i%L=o&8K%eh4k@J0lvr*N;obQ0(-CbkdM?<7tSRsx*97Y&;V|=E3zPjA
zzZ9{%&rq3tf30jM$C<o99!XBH(F;J#lWgGvFmxxocY!=bI;D{NS>re?i#iv`L!~nc
z*|?rb1KH^7WDgby98#Z4CkjP^XRfc7&l*_Sz+(UHb@D}`Rb_v@P9D^Hp2(!Cc7+;E
z<1^sbBa`R$zgLjbiEvr6hDyX<dW;(4a7b|%8s%8VGSV+=coI^Gl)xWfY#dhNWYz#S
z>w0;(cr5AuxDxNE$z*NU%Ts}ycdnO512^`Ca)H#TI2z-{4zT({)H$WFonJu~Q|^-}
zdxXC^qp%;>OGDY$3*{l=*(8v_Ou1FgWZM?XDXg_p?vr9w9iz?RL;^RhzaC`%W2Jm`
ztE&FY5(Wq|!%RI#G}YvY?nMC~n^QfnEgwBEh<tsv;Sr*sI~?NGQ&q5Uha#k_T;1fS
zXR3y#$xCA2Og8HVxgafDJ(qj4RhD|KoGId}v+v6{$o={&MYF<~yY)GVH;1ruj!J&R
zRvuL|nOY^gip~jr=Bi}%`RQ#0pG`go#dB1{`@!J-pzQK0`GTpr|NHRsdw?L4xwzpY
z&*LM{RE3c$ySI{LXe+AZVvD6fRankfaPu5l1oj`ZzD8bvN$R>9xk?;opoo25Bd3uG
z>-}<>vsi@~XWRsrNF-XyD*f^${1w+r`6a4QVpe$bIk~T`lsAWuGmV$S%1uPiOG0$H
zUU!GvEzIMk@wk9Z3dpa+A%)9u_VP1wsy4Y+rV{b7t8SD#qyWdP?=JA>=p}L=F<`d%
z<6X2bd{lQWkw-`+D$)>U?jV`$;1ck80~=5eiEdKK2y;?w9y=P-NfWE9mve2+ybFq3
z5Ly_kmsi@C>s@jSZp;c7G|0DOLU^J<o&(3x4-IlIE;%8&|KPPKCGbCDn=0T35U8<^
zzu2G(8`w1=`5D_L{rxK$!WOn83_r`IVfj|FmESMyyNws!t_t|^=69;XPByqv9wY8j
znR^)-#CFf8c5=J6qEY@vX)T(fhmpY;jgX0GoiJGXM!)G9)CZq8!e)XA9EQOa@fb|T
zlLhKxSP?h!5F1v@!I-zGxE_W5W*S1kFL)%CaJ!2uT*SY*D-_;O(+X#&-<BQhOo7$m
zu7)3AF^@`D8_5Q&>0SAP?A?4cdsG2Gj)?;nVV{22t`-XI><=yS#bm#Bq(!bF;sJwV
zmv)%!a4?A-I(Q|AdyFUu2RTGMAJX9*Rx^adNz4U>|Maa;I?}E3B<T8*Tjd}wc(p72
zgh~o4xR<WToOwX%6GrL#mcvpx$qp}<t1uT#xJ{k~bw8?A9xM_YD{Ga9u)p4hPd(n{
zik(Kg7oIXZd?u;Gr}W;rKN8OH?&9gJ(bC7Q@+1!Q!{u^+cEt*LazCq$V`hwaR8aLh
z_hD6V+StP{kW6jI3c1EgQd!6Sasj*Z4!N%*T`wbK+VJBuX?FaDi&eJ#s62*bv%SBP
zcaR*d>DTh7BKzBY@+Ep5Tb@y8wJ_57<S4)kag0qEV^jR`Y?{e-t(7aKafzo<cgxje
zf_CrSm=f8W_X7+UTT;R@<MLCal)WC8*TV*^`i<;|4fx7$&~`cRVn1%76(T*Pbe()w
zBn#M;_seS_u=q3;Qrd5W{N<p4jVvtlwKTvF1vZTZip&R|4j=^BkO$-nNbAxEU{^M1
zk31mHv4~B!_#KO~*ndAGt8Cyy@>%XC=J=R&i?xr~Vq<U475lU1hvi?uHGKMEIhBx=
z%(ext%9Tc^LR;j5DXVOxa57f3ftkL>qN_)E=A*e)Hp!ECr$|?m(b9YvBa*NN=+Xa(
zYi$C4AkXLwp^g2|7C8WCZdc=Q&)xM1tOqz{ACU*cZ+r9+c>td9GK=;9t^6cj>Gwx}
zD-UCl$K*6N@KJdX*~Z2{3JY>OTl^?Yke%$FN9Bp|%l_q2plLVjyH)m*J#6w;eA~vp
z#Kp>W^mgMm1GhVk+tIk)rQgcsSeU?S(+lV0c8_rj-6iZ}(Z}SA#daI!_kr4TkICLt
zalehZE5tPRlLo);0Rv0ixXvC&j47|fmSnMUW*J9>gY4;N<c;J|ckw|T7Nhv#q~eEp
z@eYe{%*H&881XZ^<r~tD>quNc3WXB}I^Dua1CN}kA#&ko<x03+4?HV}ap4#)IAb$_
zpG^XOMz8HYAqi)BGk7F+PM!>_U9d9p8n{AdRaL*A=(HOk((T>D!Mh|^b*jCW-!>Hx
zVfm-pS>v;i(Hozam#FD@CpzsyrqPDZdD&)x@q6UCVvhOn*dF;xlFPo_Bli<?&3D-^
z$g>@Jyq^xiooj~_E_*>Pp`?Jl&@PvgG0gp{oR>GIN597P?$<b@U*p)KSLN|oCT@CF
z9y6-gj#<~;h*Yu+k}ydGtekx<;pSMh*iME+zW6PcN&*&!97PwFm}n_YLQ4tnpW@CC
zO8HRnG*d5|UT&6MkW{vuJ@lG94J*43Uza`1`a1m5^H|2~a)Y{nS1z{;mF(`<<=IAw
zu@L?A{qhi83ir!1YyrI`aAuI*y&ofK=*B6Qsm?#kVMjAB;t&L%U1-r(|5+YS$#V8`
zhdc!$P2Z9y*jL&~*0cx$js7TWdPGbUF(r3Xe--=PTYA|q-;xVmNo66`_V|)t4bW!)
zO)inJFynQytyY6e+LDIY#s^}x3hUTE-i0dKpryPgj}Rf-yvBqrCQ`N~)xf(XzS`QG
zep>+&e6^KrdtW}IZsQGXu|uqX|97a-otpatc^MIR+2ecf&1AnlEFXp{Soxv67OKE?
zL@vQ)-VtboeQfy=dCY)zz#lO$#N&@hQ3&{9dc}e@O9HX|?2RMPCI^`PsC<>!Ztpoq
zT)LlQRuS6m!hs(B-QT;vyeRtHkwl(@THZ1Fe$jqNhjQ4CsaOA+e#}J3@g#&CV|_l7
z+xi~o13hLJPUv+`qRvU9PN!MtR8pNz_V176+tgFMPN!Wsqy6%@Tq@en>UE)+iG#iS
ziTpS@%vwLi(!=VAU&jV$FMo=ON`yhIoRqgqP6xSn{T1{=(dp2Roy5G3`LpwkJVi`*
zXiZ<qr$jN+!Rr1ge@wEq+5eK$M0jq_d@E;ZD^9`8?UU;uBXmD1pPrhr7|vz?^)0wP
zj~8l!vt!A3awcSV)puB9k7VuNLEsCR^R&FvJ_alcBL%EIj_p4!*F#9ho&o8K*{x^f
zp}1^0gQ~^sxigqCi;Yf|IM^tUrJpwAd-)1kEKPpG6pMZrWFIoMY&m>E36DoPni3CL
zgn3#4r62LRwx5kYgt>N?jc&I4ct;d>z=0W62eI4K>%k-tK@MU!5gN3o?DQ1^+2cji
z&!-%kjZZBOLr|6{L1|$>Iq5a(at^G;0Wqvfp|8O*#wDE<x#%?p|K_k=F52I|M(@}@
z;uh9w@4Bd6f<wfaPWx-$_oWK4uj8MgB;Bx+@p>!djeli-PN!=ym+JRR`q5tw-l`XL
zzwRT#HXURKChqO}UB;8bPR-e$&XO>t7G%>j);N&vf-vd#<FlwUbB_b8q>#cXVWe^C
zfsi@jdiZPh7-eEvv;cC3@&nnvEIKf?-7&Pdu@M;qzWONH?+_ep=P5aT#86-2<$gH7
zx=KL(ni&u64w5z1$eduF!5BwLHmyVe6?pE)V3l0Rj$~7N`T<98)8qjADVz51-+>_s
z{Ev9hA>hYl_jTcr(a+*Rv<{M>kL!g&w7+=F!Kwz)44A-Q52BagqJO$<Fm1>@&g;6}
zuOB8IL^z=XIz&!7Ad7mLj=}Us%)*I|xtPyG2lY7xQPAxp<e=z&wo|Ol1NdinrF8b9
zhrS7uSO=ivl{kUdd@{O>r^m&nVQH<q!4S-BtC!N*ksNx3Xmx6yAvE7EsZP_a&^VIb
zFFDO8ZMc^XCSt0Sm5!nvV!D%M<<n|0)5&hjrvv%*!F;+Al3Q3nb1jw}r@}f%1G7s<
z(|Sj)lOxC}riwzI_SI<GYJq@uUQY}97dQ>h9pmg4p#rB8-}&u8_EsUi&o%~C#R8`=
z&e`qWh`;fzs#~4f#&PsX5z<^*M7Kb2KQE$-loDPeUnphsCeVC{>aQlyDqKFEKz){S
zbZhV=+K*LFqztp*>50^9nU{EZ#TE20S)gT4qEiVZ_Ske7;I|ahSt$7DVmboz<afn%
z1+3s>lj(3=woIm1^mrP^o^GL;=e&Aw3Mi9YXBd;F;+4;6VC+;{W;Ebok58rdNC79Q
zXPviz$LE#kD`Ye?oVjkLDrV#dOK2WSx0lc%u(m%fp%=mzt9NwxG<xN%pfh={1Eh1q
z{Q)2cb4Npua!tL<HSu!n!|61O9hye3yR^ASg_hnGS`5Ja0tE**`*NMY!_TL+R-%x&
z$|>N7!W+mMCp@zHAl|zY11exYT}fYr5%A)4+Rw4pNz#yGb`#XVT3+byliHlbGcC9*
z;4d9JS|FZfRpHuNM0L-3-EzLySf#c(<C|&*vi?_5XX-j9aRdQGScjfi9Z+s)pfB5u
z1X1m)Tj?Jx;wGn_ag#NRK4{;fcUM5}!dBK<2Ku~tShBMJnFR*f#tLTB66mI-vmxKx
zl0N@=Hl2h8g}sbU7k4{Zeyco4+~d@jt|Dv<E(F7NnnJKENeFi8jk(9j!Y<s2c-+mQ
z`k=MD+m4pg$!N6u#XIHnm%W>5H=5aRHgh1Unf-b*8=#~Q@MiGXp@Y-<%%yi($U*Jl
z`E(t@+JbjfU$lycop4lUX?Oox{%;YgjDN^3mklvKo&@?h2Usp+CBO4}$jM1waw^wl
zyh6~da4M<7DPAEc!??J8A^lv22HWR{$M=pJ+A5M1X7|$~e$^umH~4A6rPh?*`q`S&
zbLr?dmyV^dR0K7JHPt|<|L&(lL}v>7$xo+=nJMhb0L|ym_&9*Y19Y;On!?@<(7|GQ
z3fp<un#LTpw2i-fxRws(FWYNj$DpPTPF_s!6tm6h+ZWR>#2oX>AM0p;tUBJPqXVsZ
zDTEN_<U^8!bhwynR=6sN_VT*Nb<ynH6a$+h&ojP}=NVsa3e<Z+3I@pkh+|R&{J0q6
z!?=`gcciLKxRKf@JP6a9=nGcMgrskj4PQz%=+L7}>0-FdpzuZl*{6$XYY*l1|E%yb
z0d3zjn!$91r#J8)E4(#H3U8Rf0{QHPTd9(gn-uYwnW86}X|u1QyDXOS6f;rmg%#A3
zK2JvoL|0f~-np?BDr=)tL|=-LLw47%<rHo4N_r2{=<w=GYbHy*osJiS`oj<{d^-(O
zsNf@a(($m#2HZubKn34$7tIr!bn4}^-Ee7&&E|vd-)!N<G2L8irP`>ql!}(+=vQ&I
zHJ#16hX%21dgdNldhyDXWWBvArI+4bg<hlcs}hxq@1-+ArF-tBaVW|Q+Auq<HHOoc
zVkDaFe@3=zd)nv<3p^te*U`U;8&bI2f1u`FProPPCa@pUyV&l>WUNK*r$KRx(eXj2
zOxfI;(mf|{Mf>6@S=eTj>9gHr(Vbmo*iRcFSv4DIA>GLfZ%<+OZUVWV-9Vp*k$2=l
z+D}{i04*obV0fs2-s11rRU7H#{_QDb(v(CDIFvB(+l>;tH`3{>=3y8U8JlR>en2On
z8?@};-}|#|o9NZ#pi$QT5a!=QEcYQgWayz3axD+~1^iRO5xAG4OC|?nJPZ{>Tn0d_
zcC3>0KCFC*ZWNChLrXty4(&wO&;+Eh>E&*In9fsA@X;Ml5l(9FJxr(CpwY5_M{f~N
zrLd0i@`c*o-@!J3CB}IRDs1GF^fw}LvGS+T)AygGr$O?+ZKs8z74O<0`KtcSCpul)
zAD^Nh^(jns{d~+NE!NT)hz{?LuJHXmu$XxU7u!tbi^d4)|7Z0P#XQ+;hQ9AeukU85
zYpofrMX@k)CA;_Nm6CIIwULUhWcse1Z?L?3mmAI9)(8<*>Dgnwz6+V>>tieTV|T}u
z(vXxIj`04G{Nc00v0`KA3G~_%-07W2HZeDA{aos}=`#mManJ%m>O%-SSFp1OkA~+Q
zX~w#zzO`rmxtWMnT^hpbB8WgH-^4N%YkD(mgkbgC#!FAqTN4&fd5<FA&i(q;I=ZV9
zJ@!sDA$Ai{u%xY?8few`P4d(@q&*w^QQ*x-4irzhMgq21m{DeKFGH4d6z&E7wZ0(F
zV?-Jf`ExuE4clMpYezhK$$7tF(@-pkJv3bvqTWIvcwk8=+`PC>PZ|^?iRiI~L^NUL
zSlX}ENZAAQZfqm&fCoK}=JjwYan=&7(hA%o`MDd_=VBor1xyTtBZ4epzXvJ+l{yxX
z`)#)D^-T8pxk3mCtyEgglU9MBNTgX$YUR7Skgpzy@U1)GAM8jdoEO6KuO2BvdVZsz
zNFQ{)LC1Bz^yqwvTp%NpJ!!K6kF}Ux)sa#MHmL|3kXlvg$;2?~K-dsRL^3>J26Q)D
z_ZD6gG&YA$^98ZVPq0Zlj5|)B#KuuG9nVOw<Z8-evZ|LOXl`?3BX%63vR&FZdr$(?
zMiPGFn~|#%jIs}Iq8E+Mb}8pen~WCnFeir{h?F89M@sl4Vfi^&P66%PO0taHsA#go
zGLdSArj5-7Rp+=gg`A&h(8C+#O}U2_C-O3hkfnQYO8sPWcLzr3`AuxXNm*?jdY%Nj
zM4j{PO6>lY=qZ@7$7aMtJ5VELx|^Qj=Ee$|r&Z0;ciACLTu;Gj72q#O^j07<42@{y
z=<A6AfgbMMg$Bm-y^ne!lmQKUl$nZDomf?~p5JV2DLJ?4ER)sIHd~%eZB8NA#mzS%
z&xK!4KVkoouJeY{+0Mu0zMkO-YsU;RuF&_S0gNHXkPT3t>0;cy<>35s3?QKrt|B4I
zoQTkxuS08g3A&ys=8MGw4UJ$;E-~2184iuq)hg;331Cx`pcs6|H74M+{v05_<KrAi
zMz_6_l3?i!gdBAa>gOOCo2h$t3JFutp>>n%Tf3;P?}~}=J;(Z71Eu#zOf8T>o~6c>
z7oUN+Hohqk(dP@FnI36u&Ga@Vr{`hY-Q+-N2mq!v>+Th+krmk^$Bu8#OXW(!+>KhR
z7iGR*%4w~#v>6-Hn`>)>HSnLu%miuZ3ci&Rn>=Hf^^6^X-l0arQ<`G}mG0-$VUJcY
zpYb;Iy;ohEi$)uh70;^V^%Q9@uV*kY->-E8GCl)lYN)<FDFLB->lRc}s02iB(YZpE
zc5n!d;1H-@zMqgQMV(@aEs@xiXtsc80D{h~1+Je->@zb#j7cB+R0OFMY*$zj4$s8)
z8Y4jhE??<WLtc5b+rR~MH~6nRAA6~}1*;|i)~ufCTV~d+_DU-%So6E`;H)uRx`vN)
zO>h;vN?fI`a@Rc90+-JfbiqBm_itFA{be6KM-z<u%$MlJSQhE;t6rkR5N*}(x4Z-w
zOsR4I;Y;*F`%D+XMjD?W%yh-y6w|Z|UZxw}h`*vV56S42a^9vMX@N^w%}-6pV3lvv
zlFUjMnQmq|bY}`92wKUWdz<ERH__X4qCMauZl2e~W3hPQVOJfb9<0oCu&WMYRob98
z%U(Q)el@ZG!KGibixhIlbjdR8*a}61H2~XW6!X4AXZHanB%u)N#%7>IY;qx?;+}Wt
zRfy@l{|?QYztWYwDp~I8wJKSTYJ63Ka&x+N7~PHyMr%-3-(Yk*fSdb^))<A?up{r%
zA?%iS>4lcHF2cxr@B}~eE_DoS10v>ymW0AfLlu1Ym|=nO2G(k*15MV(>t4X<Avz0-
zn)!!-o(=5QL-eA84X&Qc{*@Be{e>9aCbX*m5x2Mm{GeA?N`>&?ynBew>ATKF@=HTp
z=0mZmz8HwN&FJ0@@6)mQ+k1>_rz?42J2~8*Bim_I{rG*l10L?C4`>d1=I^v1e|HZs
zdpMYV3H<Cy0<#bI80;RS>i7@nSger%<_2u^dGiDK{r0o(KcKmJ2YQgDy*F9f4L}`i
z`e7OcFFb#k)?$Hl!H3juJWXSN`H)h}ItA#TgWRY?F4pG@c-{`P0Y_*NoI1;o(n0Li
zPvCw$&dQI|2VBQpM4H1l^YbG(qNiBu$21$?azDYs^8_zm$owDET=xD^Iu0u$z5d@n
zrZdg_9iCo$rgulr7#-you?&eAdbRz>=;b1Dv)hi+Y@-iW_WmbylcQG$Mp@m-jHkMR
z**cT)R37ay=~JiURW(7D{)2kisZZf2zv~|~bCk0OjMQWpsl8yNnlPq(iIpq;h)yj?
z?@|4Pj4oul%|4vacM%!8>oNfg#LYATZ1{-I6|>#!%a7=QfjMsCxgHr#mwQH6B7qDg
z^(|QWLav+b`xuVKiN|S5A4Qr&^4tk?26mg9)gPzDVu71IeVh(dGa|YOj&ZY-c(e$4
zdgTG^!J~A5IMU6uqtwH%t3Rff@k)SPGipqa?&l?sKCjp4$C(ftKA{uD3Fr2$`2X%(
zNuqDXCM3|JKl|)cY8TPl?>?nNyd^z4RGi$Q;$9spWjnusi=pZS;zBc7+X*`Pss(Or
z|AWe#+=3%NaFB(krVeLtU=O>`P4Z1OJMWS)WAVw&H@F1cq{w_-Z0tqF7sp~>)W@<v
zqXRR0S4#_q8gXC&EB_3WfseI(Mssj^@H2YRRp%)T!w4rf^lDp)h0oh^M&ts%#K+Fc
z&uRZDz3K@wfgYo~T%^juY3J<cD+iLmXZ#}0Lyle-hEmu)pVJa`0sm0RonJs*Hv9f_
z+7G?=eSxtA*f@R(GVj-PSYAVqmYRCE)MT{O!~u;gXzoF`7EZV22{~v%$%HH}H)^$*
z43Kw{UYDKiCT>m>kOb-SzCx14DcD?y*=o62?%9*{5?cYBeRKItwwQ(gMf+zZWPMc<
zd_&e(nbjbt6$NX0bWIo0wTZ4Ji|AUTYirHA5Yc|@^p{YJZEVL^n06+AMH?{f;QlI1
zJ3o9yFUi}`14)~DBWaTXbCUs~<!kD{eoK!!TYJ~pYSiHuB+y_BX^#rqdso<QRM?J=
z@3*9}8J)0L%Gokp#GU55-JP&#+SzBFbOtV${gdW;clAKaXb^070xO7!B<Qu<fW4dD
z@lX1E>z*EU@{{Z2_o}n6N8xtP5?w8{1ME3X@K=(u-`(LB_TyGO;1&+Jm8sFjs)mM0
zRbwRB5M=HD!Zg^yzWNvD#u9enH?-J$utzJalN(vxtBpf!-8Zzf?_tQ}Tx>gs<zEJ)
z6-L!z*7*$$7ar>Y>^Sf8$pli4Cw2KG?jdFTm3YF95Xb$ez#Au-{4KqvwG%+PW%N{U
zdYv+A^NV=KE#QZN7q%l6u}AZKEMR%VkV+a0-=c!XNosO4yr7{91_+70y=FmcD%!!l
znrdV~+rOhO2Qnh(wqS|ZAPGK}`yHn70Gsn27-uWHjbFC2-+f1Mo)dfjJH(hOS@vlJ
zZ+5Xor>PfqNZV;1FtNoa$?5F9)0kg!Y&eLlA6rY5y!g}DEHJYBnWy-t!kXTEhL(sW
zwzv;sb@mKJIwBkUJ(_4>wckU3H?jM_r<Z~`-}oMp2benFqfZCfupjVkBwO$UO17{y
z{IZ7a=9dlZ#1CNVoov9raoNSL{x@D8WVieq)z7ePxF8LMy@$6*Fk$Lh3@V$AIZH1B
z=Tx7C443jtVG8!p3>VTS2LiP%67*%~S+v{02K|VFYgsA3>}0VYAxl;#+wvpkPNy?|
zg(HXk@FQH!*)07hI$F$g#&<Xdvf7_$t~Aa`o-p<*Z2Jijq*7;mloK)bpXlw<0w*$|
zJ(Fnqx?7dOWW&0R$|$mF-5bhSvBMePl9J7CI!#m9D59ibAQOpl84tFZcpZm9y@prF
z^!|w`mrFBK$T#aARtB@yvs7iZqH-}J8S#8~HrpX8LlCuyH@UOe*P=2Acyn2lLa4Fn
z7G*Rv*m8@K5A;1{Q6?af;|u<Dl4VQEC9vGeB&9?;m13Uj#{MWNS)$X$-jb9d!_KpV
zBGW}Sn|bm6#6D%V<TTCfeXiR`SJ!3>eJ$R-WkO$l|8M8I6}W!xTsPKumJZ5HobuN5
zNH@%a+P6Ffk>qMFn{tCl^0bgcsUyfMde^C3PR3|`Q<QrMF#21U@-o<Bv0K>zLn0$p
zsj`$SrR?TZC5QO6jj2i+Q39vuy>m4FQm6K0AEkg`()_TmvH((3n5JA!g4*qA%I__>
z=4U7m5(I|c$i&y>+Jys@EJ<CbAg{$oJQeK4f2irKdXQ2HI=nDQ*@z6b>jx_jN@T0{
z-35x<g5a?-Olc<DwcCa%GbQle8y73zV<GU`2;~W6k2Q`|MvCdG_V7sMkVQs3TabK=
zG%FcweStDr9I3{?LPp0|1<EW^piR6)nQ28H<6W02HzVpt3zZg$%+yvEDUT2&It-nt
z3{**_Ht9;mhHQQ9>Z=u*4`I$Mr5%wjEn~iN1VhnIU!#PHrCDv!Zd{-|EaHuAp^|5@
zELT@*W2=;OKHM4A%6IrM)u%j8$SUoNMT(yXp2};LDF}eBt5x0rfKAN5SXqnG|6Hs@
zkT(>lQ^s1`)UE0YmSa^s+TJ>4HX++I_l?Ri2ic|l^CqP~A-gr(GUYD>I5^azltDFI
zbhC0BP_gZ1Wj;Cdr(1w6>oHY=AG7lg)ytl^Rher!slLZSj@3plSC$jI)g}q<1;Bq@
zfNgo5WU#kyQ;v(OO?$3Yc|n0DW4EkUJYuSiZCb4?7t?JlYYoPetu0)u3?Xdnok}xQ
z9NT;?^xaE$Dmh}VSu!uVBqPvN-`uTSfE0#|dz2eMfu;8-X`*GE?RMUq9PQ~gWuFXP
z*mS>gnOuxcNw2e&cPMG>rTdkIV8Fo}lx*mw2^*AgpwrR~$}JXBuKjDHk}4v5+xoE5
zk60GiBv$aSvXlLFvvP-}GV$bmL|LQwILJ3Wb6MjfN;(_$h@!IR9|5s9v8vw!SwZbj
zzg65K)Z1%Ym19u3+a813XtuG7UsSKeIUa)^SLRFB<pAPfe|}NDm`y7M3g7>|q7tzU
z`S$zNJ6L!-I<b!ZYC9xmgLc(Z3QoA%%C7m7@-YZBQB!K6!nbOm`hM+%rYsdfDBrWn
zv*3-a=O7VKY&Sg*O>>BCdtSK|m&4C1GqB>kXpa)Zg7V=#${koxUi5-;3OdR2qVf&t
z)YA7V9}<z+*{OZXe~YS}z4VfD)RJk>W?NrYu0dY?*_V~u38ecM?aIF?8L71%P!13f
zVdh_z>%sTi{tEuZV`c|{&t#8wfXgb`Hyz5QmViCTE_w^X(!g5YQf87S?X|Z65-d9S
zZ6yqr4YIbkl`Da{LvJgWSfENB?<vFKAQ|(Xasz6w($>GHyllmsblr#Gl(lU6hZxQ}
z_U8|kZ=quM9f6wJ#Lga37DJ)~N0pF>jMBINN115FjMev3<wAr;$A79^0o>jEsWNWV
z30sF9$8V5qD_Khfa?n0RNGpqkm$eY%qGa)>U?56}WEBpv|M^sT1?O%&enL4RVS;G;
zQn^q%YbOrvxi6LbL_Pt|?o@8%DVWc7D&xge2m7v584H>B{!<C@6HYe$OR-t9(jB>i
zyCT>aCHh}Q{|g74b#$gfd-+>sn1q?`^Y1a2<ghYP9R|eYv71G8mSv1%90N_x0L`uc
zR<>Ee#Cf86;r~(Ya`8{H8nS@Z!>T$MG`UAr8^G~rRkg?pP9N`5W6)6SX}222WpJwc
zky7aZZ$IfVRj8*A8u4jY_EE<ZDB)l9RgsvbS<=*I!fBbV4wRc5`zv=!+Lm-R*q`NF
z6{^`rs#%=2!@OYkl`QO4YmpK84!;Do38U0bPRLPXR6hv$n=z^fQ|5pobsgxrqeyLG
zuZ&gu;8pPi^;fL&GSvwJ&z`7`V~<{@ri04QUZ#$NQadzJy^uOYQQYd-#&QbP(V+bN
zLiM*6=<@F`$7pvY0ln9Nw-E^4&5lk`Ltq5JoD0DKxWVkDiE2N%WdQSH%W=mE6CNwN
z0>kcPORrGhA!oE%lhk2W#F}p^Q46@0vSTpm&-Rw6!$sApeO98{2o`mxrm5G$k2&i~
zwGfwIU#Z>%Lk5{v>Oh{<mo**zOlLn{r4H5_r>nUl=)L|bbus9Ocb8%Yz2s_jI)S0X
z%5qS!ZH77wg0_1GPzR;wnWdIs8m*nB4u^KTXO_AbZIsPcZzUz#f!S&>g_}WPXSz1?
z2K8?gi<6vM^=fdV7OGWmuz<n`f+`a$o$Sjy)XA7Nx811zQv!uHhSl3I+TgrQfPb;U
z7g-{ZJee$@0@>s&6h_Pmhhr6bL>7_VkuqfZsz$ZYy2ZKGX=s18r%`<n`e;FudH~Ea
zETX<f`0^pDj<p5=<siE#rj8O1ISsYnLzxdD))JYtP&WQfr3oZhvQ)hrdg}Y7>dlrD
z&Xa8EGW9rKSGK6X040vLsFzF2X<8ZEQAN|VOK(<hL=u4Z&@JkIPMNxu>f<7^k5Yf3
z-i%r0u3xAFVTEW<{8GKf;>3AL9wRfe)F0FwzgDMOfXnhb(Dh<=@(y(hDb)(@RDB|j
zQqmj^>OG=~8OY7Hd(}Syoz-pX<*=tWwt;~bF#COKyVR_<QC7KDxrE7awNFZA3Rf#z
zWwI9I_OT1&>L|+w8qltdt7U|&)3*Ia&4oT-e_f~kLk-$IjV+^m(HeGknp!M^dwyK6
z4uwT<0aHH^$+q<msK0|M{QU!J0rFX5d~VBT|9(Kd9pba{K^3b9ZTo|2od|W>?;&*_
zQkb>+ht*l!f&1KMbu&n?a0>*XgWbAC{Tt+=>=CsJig3px>Rsr{gx{(%FFN&6^(C@L
zqg&N-sn3R#X~Iy%k+5Qc2NFYR*KXXV&ajX}n)amn2JiaqPpOL_f1f<1*1?J@ds-a@
z&?}x+X9M(`PpgY8ds6nXF+0>dU_$KOf$kh&uAM3pZ`sM6>h)lT(*IJIfTX+rOMM2E
z4*fxW-+7p(CDCTyA5|oAon-!9oaovoyVPPuI)y`(*oYU@f!Uqd>@!1{K6gp5aasV@
za5QS}p+a|6=6^wb(CSnKY@?B01hUVti(XWxi)T~V;K!9Bw&O)LohORC_M$o!$sw9^
zuUbry9im~@-AXK}u<-7BN{MJUzN~&BwK`p8LeYF2gjU;<w67i3V7buCN+-SExDy#9
z;YgHgc_GyW`{+>*wqWtdN>>nGKcF~P7YIf?)#ghqnXA1*D{R?Pehw!mhH<h(2pdet
zd#*wJ+e}$#j`9=>oH!J2#@<65-qxr`y*<_DLLrQEDf#he)yQ25#k%q%02yH<oC1Yd
z5<*W1#_@O@;$et{!NaK^J?AOuf}wZOd@@5`D2OAj5(xzSh*O_&tZA8^{Dad(!x4Qn
z?Cv?r_^BCs&Vqr5bl^c+oxnpP*bjgv5{#soYA=$NkVd3K(rdHOOl2&B$2gp#n_YAv
z^4CXWOQd@}rJ);<(T<o@(2vmuYlDcj@qi})n|nUJGxRhABTP3fVnjb)aj#f^zaW((
z<JEj)o_S}cFz6=(Vb>>*<F!Wkxp_us5y8_>LIpK@<tXqbb)0)irt6%Y?WD)PXSaS9
zjuE{Df)r6~R<k*Z7V<O&#3VU*BmRf*a%&{&P2-_Kj!V&<!wWuR7UYCRAQcDsHXGR@
zylTLQ<1Kl>kTVL0<JFS}rr>-z9YQW3xP!XYj>`Nrz3x-?^bjwPwkPU(f~`sCj36AX
zJATbGG<iT5%*0Qh_9xCZ;SCNof~z=8OUUN!0F!z{{4`3$Kd(ntN+?iWiE~l;Ib0w(
zHjNt)uCGS)*f^IMCo_Pk5nn4mxv9F+JA4*`t{e|YbKqMh@SkHZ@PNz!<EZNEtBtQX
zkr&j$nWkNw5JF7@6T)V&3?jc=z|f+8(4=u*GVeD(=FkWW`@Li)C7FukDQnfU`!R>;
zj0d)h_#K%B_KYPu4q7pfF6w5&5xjk!YxH9kjW9EB0)@@I1RQka6($&fx?k}w7*%)*
z0OZu)+T2YHy>mc^F5yN}L?Y(RpTK@zGSYSK3GSrlf$j>nlJSFgVeIOxmySiAlukUw
zx-!vOUk8mN(RsO6pw~>kIVUJ=WEL#OVYeZVo<C6P_ZAw5$e25h6WpDF76RvMfeVe!
zKe{GDo*Ylv!67tV=e5^{TO&AFIe^136G+lCBYL(9%N{d8feQidP2xDQQySwMK7C4E
z0EsB!&~<@GcrG%E0+Dg9)sytMa~lISQ1jeVIl<*F)ZbMsGv5@u-1+*OD+8fGG}sy~
zag8>L%tY1@vesfPrLNJ#6X0tC<*wBu^e5wtnR%|&Wm9kp*5ZJUl!|5MbhW^>da7P*
zCjM0d?vbC4{p&v0N;2B0hocqwk-9<G>XF3n=2=UQEDT)cy%fsX^!;iPZ17e4VQIFo
z7xt@vz<Me2XLTxBscC;!w-ZZ~YZY7khWb~_I#)9rcR;<-*5J}z|La^^+4ckK$+T@Q
zCAO@|HC)ftse+HTZuEBTslTXGMAt6YQ0NT94%kn3|EWX$ohY}v_Pac(tnCN&V(ssL
zQ-@&Lsr`6R-C-vI?dfBxMZ_%shkvN8(m|K>3S0J(dI4MTkvagAe(WRlURc{GJWad$
zxcZqWAMOe3Bw=$uQ!8)+;uD|2KCzbCrBcFXr`nLh`-R$sRdmxyoEmZH3)KOu;o~o0
zU+Av}xz8GdDs)GE^|3;PH1~F@rR>v_FsYnwcJY_$FT`}W_Vkx(HJR=yGKPxCQ&l~1
z8xykKQp__az|Rgf)P`Qbg90ACfH!NA@h=NVS>`FKr4$jx+^^MVllvqhi1^7D>Ke0C
zspH&6kVWs*N`f#RdUc(p)ZNUo|Eb<0uGD*Wt5Qn&QH;_WZP35e+sMUDE}Wwpa<{pK
zso2>VX{o|d^+*+sA-656IGUHm%mVA&0y}bA9i$!qh8uue*sSkhC%3WDr(yZ7)1E%9
z&VvETtUsu~vVdsX-9M-|iWxiIMQE%Fk$>cwVMABARww8LPS6or(U0nbCW$MJH!87V
zFMv+s2Z(l5vb7NDfJ-T1<Q+AQHOaQS$*%R(HWs<Ymr>h|SX4biZLg;6cc<yVJKP7|
zY2PrL%{D;0SFtT2_M|VGY_mPXw>-RPw@sE15qO4`I&9f&h{JXP2t3VUy9O)nha9$5
z5)A*LskYl%drX?rbgn+D5*o0IPbfl$<95%LI51dOBYY~u^k<aNZ6sd9E4*pUKv1lK
zDC8a|x8aOksOQ>d9K@D5X;`11xbAJiflk31FQ(WL6;MtQ<DbRw6ql8DO((jaHIad1
zni*_Ms%@zKta~`r+~nq9y`OdVwPk3ZrrQ1rA4{sXyRYpAi)c;NGW*-c5Q~~B{D1k8
B%I^RG

delta 15939
zcmbVz3s_ZE+VEccoU<<Hj&i?)qM)FlVwj+!Vv?ewqEV6_;iyNsUsOsoN@ldl)K2tD
zW@?iaB`FncZ8D=`$|)==`-X~=nX&o&N)yW_E9`ssK1V%d|Ns2Y?}KNpz4p7_cdhrn
zF8fbE$9El$XC=;Rhpp4z%D&c-0QYygImGY9srvJR<hfI<DJ0)rXiX;t?tRvqiNpPw
z^(MK7xy$Kf7!k$oq}E+zyGdWi$YT(rGlsa2*rMcm28W|~fcv~{3fbx&ZJ$DRy36eo
z$pQC4dz!wPksZ)pXz+LcVh_=`GV&5^Txp1KkN1lpZSIAB$>gZJ!EX#{cOUV~kWVpi
z#PC43Wl#v|b`KwvOwPG)8<eg$aB>j-GLwe6*BboXZw-pj3$?}ioFVL?!QsT>ju<@T
z#W4TYC^GNG&arO_m>xr8UR*ip=eJ3&Qb<WUA-T|JBomZwM*Ocnz;Hn-y)WP+K{jH&
z{RhM2*au;c$|MJ#vycafMM<*~XRH#JLK3stdi(OS(jpF-Q{)h(;x2MD9p3ng%=qIc
zM+p9x@xKoL>y=NA816H|rjv$PIhR3i5etQ5Ck=DuJO-QI)d#}R_YDmsAD(^RuwO3l
z0_<JEgi`*2VZJOo7$mJ`VesXLhR28#Hhg5rOmQ-O@O)=gO>s5XFC;Fe&mt9!a6Wg9
zUUD%B|CzHYEA>(Z)`^5o?$JTPKtDDNAtkUZpZde3j}3lUpZBpLj#R+fj}4=Oup=i!
z%gd^&m%2(<l(`tOXJhGpH7pcm|Kj`EZx~3e^600A0xH)r*e@HRm5^RTutk-;V+RXU
zz9i&6J!$YVy1|#xo(B)3y>*Sz4IV}}sEq!DEH^Mnc^DZa|74g&wkmUeGVCQm+Zc&o
zR95Yr?<_2<ERu+SwWGMWwn5SVVi-i^ZQd?p`VA@ioy@#M)rKJ0*l%!>M&)1qhUqeC
zY)hbzV4=J&)18DgDf9L85FvZu9Y!Bic2Ihg_u)g*2)j9b-PDMj)AN1%q$UQZkB~^%
zEU10(9&DBOGYS9Hx53f@2K)B0NToy2{j%K5;CP`S0vgP8J84k@EVNrsTHsqhB;Lwk
z!%D+2Xc<IrmfIM-_Y6)`H<<RxM-fLe3fRw|eoBrh-}uunWcYqC4TCd5bO>n&5ln9*
z9k4u@=3vntLIa>9nEI0w@M$pp6X}8#A#_Z_3Dk+X<<+jTQa$QKC#r*ePFblFb-})%
zti-98Iz7tJsUU_xV(G$1ogQE7CA78+>kOgFl~B5pl5S;13_U_g4@AY&k37XBB_)Ag
zF_T^;c|83$Cufx4$&?t(JsKkf6HMxbxaoAb{~2tN&oT-BkVS+w5$PN(n@&>=eT=-7
z4m+MP_=7!*hQQm?X#%+nebeb`Pca#GXVEQkAA{BzBmi!nL1W1!D40Pv%9j{q&ZLPT
zZlUu>Tplpf{u^i7kF-(k`@M)By@d`X1diWA@4;fqOuCL3;K)q+sw_B|pEC@FN3-cj
zl>A?_=~uD^E&2@>?5=ahhGnA-%uE@dERpEKveN2GM`88VCigqedGPhERJh|;jON!`
zSFGqU*k_JO9q*pCGQo1SS(5cHO>-Mv;qE6_Mk}XprAdV7z&e{IxSv`T>h18(s>JJf
zbgnrb><(9|_x<Cp$u<qO{(*Uw&b6+xnkwl1fQ7on>M^n(cW0~~r*(O;eswarj_LI^
zH68>nW)_bm0oR5^cy-y;2RE4z>TWJs^M8CB;+|K!91h+}{S0>JdS{{gucc#AM6WKQ
zc_M_P#OsI@0ilcO?IZ^F-%bM}r5LU^Pb*ueDq|c+CBq*{`!UE;0z9*rE+I*<GoJ=R
z*llzMNrC0J(P455*Id<wd>W9J$|W-iE-kApaTKR=qF*SH#IkZ{r9)NnYfVE-9Hm8$
z>axmO4~&%R!D`5-=}{$&q<M@YeX-NEa#i&tiC~BIORLM!iD<qsJm7bLqzNjL5hMd%
z&8MMq#<fw+B$+VbcA8GIVD0S)pv^YgQv|}_Z>QNL8)EOElM=G8&p$_<KmJJCkDSXT
z{6p)g*W0B$?vC&eQuf_JAJWPB95ziLK`?76T_hK9$bAKw{Ox;m=r9M@FHlX@$w}}c
zXJPunNs{EykkrY^*oq>+N7%oV2Epl*G;EsQ17G3;U!p=`P~g6fqK4n#C~6Q6&8Oy(
z<un}J%V-d(h3A&h;ktTGAbdHR?)hcZk2E~upy8xegPP!=O9t0*lDw#L#RAvLQb%=7
zCAywEc*a2&lX`@yKzJ43gUhMI|3=8I98!K^gCVex=E&Q$nI{k5>78_=&m?wglTNlv
zjp}Uhsfoj~TZzB&K_T5s%zM<=W+^yY+RtImBIM<X1<a)EankWbKH$UAfMzf9Rv+Ze
z5V;Dcm%ftDvbJy%KHur6Dl4s$q!#r>Ftn_svBp-Fa5I<MJfjRa>P2+S2hmX#zS?+9
z1wG28b}wj$7xc0ZXb1GJqA9^Ac=%LDv7@xmNrEvBb63#`{LL<cx>YFBOczZsCx%Ze
zt0^sX6{9bQ{Vuu)T|XA-Fm5%9;}pzTjeeyYs#nwD=(QhPO~;a6_+T}ira!~UF6f_)
z5hrC09jZUe$z$&FpTeMe4UHz}G$ez@PLk54=mm}y&YZF$3>SSI{<(&ZkS}qt71Kx=
zX~vY$X-HEo<Wn4yUO`@MDxuSn=vyUp91^`;LR0j;++6O?G>5~jr8HhX!`+!b4gn~o
zG4fgNF8ygJd{K&?{hS96lELT~0!zziv`VYKj20U&t9<L=QoomPM0m8>BCcycf%bBm
zGRh!+%Qu5i`6iKIM+vg4I+$DqkR$^{SI|U%efpfT5*I2d`jL5MWyMu`Nx-rSI#MSI
zp(QpDUrFuoLj@fyECR<ugyg5puB5l?bO9JN-nH;Bc&mm+>q7A9!g?A9=CyPTiGay#
z=~6jDsFs@yAFQSRx|nMa>u7={PM~mu*J3tPS(OA+*U>R*ld7`G2|`NHI!-etsWh=Y
z#e-;gs(1bqeCD63bvtagOA}ORe9VCJ>uEgXtf%`(rh@e;dUR8x>u4AHB(f1@ne83e
z_JedVCbbnCX(CC0$2ZbtnDPF)k){vI6C}~=Fl)w-Lno65Ih$ylF<+&|?UGag8#d8;
zlS89!m+X=g_&szpaly8G=zL5mPv1l1ZYmM_=&1`Uoz5IbIoHo3l711fqx-7BZrYDr
zBP9GooQR?pD{`GccadICj~MH<_P2ti2DnmBA0b=SA`}kYOQ-s8Q$cqM3IEjbGzzIv
zKuRZOv?2GQ0yPOBuVA*8e;+lG4T|$V`aO5w`)h2(9<|?oA>p4&vr{J>&}z*hO=^ZI
z+c5sOD8<`oA(2}>Od8wfW%^Mcrnjk`%=T1Sl8&kv@%fmBr(Fa~?H=i2j+VXyE#nV=
zVm8>fou*)-_ThHw!UEgd0y+grTc~ER>0X0JE_ZsmMD3)v%3Z?!{LCP@cPG6Cjrwmp
z=^QlUS0AEL@)-d=57Agy@(^7*uq{qLC7>ddrNns$-QzRpZf%72e@Q*+knyS4L#@h9
zryx#^+Iwy~HSnwkm@1!^&Uw1D3#m^)<QpVJ$#|F+8stk}1qyzg4mMrZfTVsQ^{ciR
z3D!sHB+RgK9;JtgLAmlM{ckiJ*xy8_8!bkS?_1D}w02_vHpn4HDZ~i%5pp<8+m8uB
z1nk>SH^?zYm|~S<l_^iqXgz$jhmL_mWo$62>QhhA=P_-_dm7au1-^Nj7LioQeTJrD
zfph}3U<vKdP=9%X(f#g{5aq%%^q(@u@sFRQTQPPcPzq@3p9OzDNY6)R8_AM+j@lAu
zY4w6?N3}EE`;c9PV#$U>Ep#DTivD?w^|{LE=jnW%oM&{06@@E%TPY>Da8psbf88{Q
z6hN**2Sfi0^pC{hsqcD`Ucp5hyo9N{(=)>4IyNkN9NI&$$EUfBB<-rHq|Zu>UW9aA
zkl99qf=ZA_lKLZ87$y8e%^epk)p$_f+lJ;=d!2fn+8hq4FJru}hn$yb6ls8xmuV<I
zX>Xo<nI6O@tWQIeYE)1$x53*Ax`3VVy@D~L5yFqqN%|%u`52;(sC+s??YKzVcZ7~2
z`{Df~2(u059;L$#I4)_Yry7jaMzuPUG-)j~2h(q9kLPVNR`+|V=sctYkGz6#&Dso<
z$*<6x{Nxs+JF+Yg9()($S*wRdYQ_Um?@=DuX1wn3+f+tOAktBzJF_AT{?g0@INrl#
z{@Ar%i2?0C9op3nT{@`)k~?XY{|SwO`6Ny1^sv${Vf;kplusGq^!pfP{ZG;?jAgf<
zq-9vBa!u(mdZg6rgSSWPZa=G+dQ}Sect&dpK5LZDg5xO+h!|6`_oO~=@5?^DvCU_l
zcFA}{`gIABVfuAR8`QnxvPz@Rh&Faj(Fh#+fUdzs3>G0Uu8YnH7A7)%US-)@SCO;Q
z$FB(Z@_o!(p6#M@Ef$mNyCl0XNq#2iDv*Pf$PZ~E!xVE?H_as>u&0|QB!rlJ#}{F`
zVSEvY2gery-*(fRG4mbqDVkp#Ec=v3_{Et>JQ`40O?4G!shICW#rrg6Sc1vJ#U#^p
zaVDt4;r3uD2`e%_rD(7Lsb1I&AJ|kF`5CSeCcv7{X*4|a86AZ61E0|nk)d`@HA$He
z_BoyFff$cb?}^Vb#mIt=&*^MqwnhoHF9*VUaLBnDA<bm-;L#phX3ke3%#xI4k_wc7
zFX+t-4S3!eI?d!XNp_N4<Scg7!lqaj1n?CN$5s+PqtfLpEvkYG2V{R4qo6ubkDGGA
zO<$pnIpOZFXqwdr7L8eH|B5ct<CcpG9$_$e_)_CD*BW(327^=!<zJ)IsZ(};O%rA4
zJ4450G<bQl%?BRaeDrDSjq=)x2vJ^JVf=Y|S!`3$H<-|R)4oOT*{JOKmaZpqlgXX?
zPzWUV(H^vT>qWW^Ek0%li-BVoarL|({&5kl{{R@jLt$gU{Ei0W&PM)s=<r)%*LN7U
znoZExMDG~eqVkeU%_gaJz{px|9GMD=BWt@R5BaFcefPtmnr$neeow1q(=iQQy9vXq
z1{Toa<-rLb9(2Io|InR&C)Bxhn50gvPnRhe%D<;$15SB+bo=x;1zUfl8%4L;<CID2
zQS6s#wruLvdSaq-#soz_)2DIw#rz8<v1d(?c^(aB&M!1eFP}3tHnJ9dp9!}#x6Nn6
z<UW&9O4zLgw<}b+s;VVlHYrzRc1f1|O>oJ;z9K~VoHBnIBV!+D>12>Flf=xbDyJ5A
zYH*Pu0AB<`o{=RJi&_tdCyXpF%+E~5Wfm5y$*dZp5R?R0YAcff7-V9TiJzzADicdV
zW7d!yHnH8N5L9W*R#5XJJgvoMwg@h8RO4<l8;wRQENmziqbw{0i<uUdD#w|j{$Vy%
zPB6psLv(>kvOk+l<s{!H487%{Y(hXPF3+znMpKcN+^v%)D6fUGf5~zNqIp;64@V-H
z8h58gvVG<(bv)djZI-glo(wrMit)f4A2c~CnxjO@HA8EsE(kvTikg%=qu72T=Xv@B
z{+=_*_q+=zFnbKj;e)aO9*AL!g+s+$U`9**CWgI@`RYrttk|REX+b60$l5VStuUM8
z%P~81Rxd4eLHhzWSh;Tq`$k7<pf`a9DSsT!I5E|#@aRkJn6s+Q&>GJk#6YXn7boD9
z8`S!v(OWe@`#TOIZPRFU;qq>$Rtr8THJYLSGn{3&g;|x?N3eK3#?ZrKm_PK7Vr^)M
zTK(^<^_GzRX4Fbf(x#Q+*b3E^%lxupTw?F{z>s8={Q*z?mSh&|*K8h|SzeA2!BJdA
zTFjCeMvrCzusxX><66w*`b&v~7ddg)*HztYCgIaPRSA<~jrNDhtN;x?IfdOat<`*E
zrqXJ@G0}eqN7{x{)PCfnW(ogPm++2s%!Bk~3R?wZl5se*#;_o{!wlb~uwcxa>&CD#
zSRm#4kP~VryZxOW(nX|B4MR8SGGolv26pFIwi?r899SHjAIl;Kb)z|HdDH@DwYELd
z4UyvzYmWzh);QJ$hsGdHm7Ve#GfYWk0pyJ5?dnvPDW5efEvYQoB%d?Gms8lw`aZK~
zA*-}yvM3^7GQ%fRS(kj-49(NnGP&Ojrs*t9E#s%N$I!q#rn5Mm-eBRkL3=n41pQ1_
zY!eoB_7>SsmMj*t_sxz?dZs)!lP%TBeiqFZf&wfa-3_r^x2gc#jr?UcZVoS<%^os_
zASOA$B1KqmNgby|=dd;z1Mnwv(LCZ{{XDjuC#WFFQWE@Q9(s`!7@W(NVo{aL9GD`X
z%4LTzMmEf6qp)~sKDwt23mkk}AF1FrNgP2#{dOT+i4k&f9!tddxjB#BgXwSPViu1D
zS<Hq*-Xdh`z^2z0q5JS{nmpiXIM^P<6+U7PfxI2m4?bRuPD`T^4dZTO+x6KNQVjbS
z;XIdZ$2{LlDI%X45iCBRC1CNvb{31-c1=DThRazESzA84B{#?78~+f~Z}+{kktG;q
zatFZW-3XWGfy+~Ihr-I+S^l{E0euQ?>{H<BqZa7ca1+$w5oY}FIVmSX$u5h8e+b<~
zN-R=|XClr!a8hybz#Z)PkO~X&pH@@3)``2FxJj|Js;0cWtg`xgvsB^f^v5Oajo=y!
ziJswF?<~q5KTaai>z9_TSb-s>1{?(}aL|CaxMr=fxJNm|qOSKqORcpKvkN<81vf~{
zQk_M?!<>^kx!&SApK5ksC~45fEa7hcRybY6qM>X#n}e%?*Os#^^gH@OG~8{TH;W4y
z9{s_tLN*gUSZ^Vk&VC?8Y^c1`!<^xbUQKWE(ey@*fIXE;O<0rhagT~F44R7A3`BtS
z<w!`QUFc-@{RVZj7j=slb*m5R77cY1I^I?lH9od!s1-bAdq78yDv7IEBax4JP=LC&
zmD?@KuaDAy%jk_Rlwznm;e+pliY1rgqJ2^sn=E%}{cuOj(?7yRJ$Bsf)1_PO;-Z*m
z_?5G-soZOU=(|zNg4eN)@)-;4TF27WG9`sgoMgYgI6P~)(dC}C40O2~I0`Zea~J8H
zhByRPt!J@vp9LOR&t}N|7WjBQOIDvXs?oJ<YT_jeN%!vb%yU*QcNG`o-gsi+Do5!`
z=Th8bcV4!TSG@qx`n6#=wAHd2`H}?^??PE#w!r+mF!e#)h%*G-cd<XA2aLL#F@h@-
zEJGo40}H|pfcy<C%wVw++!}av9X7i+pf?dJNErOr223?9R&C-j>P&>y!#zLijWhRy
zl=W<Iq_E;_1FSe(^+yh|O8BSRAHImNdiL+MUEh!JfSQpQ<<w^Onn8~7zG}OlDRP_@
zE-Yj#FifkSPEN2wLmu1o8@KiU>FR=2bcj9J$J=Ri{-3>FgRi%fldM{R2&g080O7NH
zoT4n-!QR)QZ%=-hO$?ZzF#@er%JA0gxHP)wVKz<9vclY-3_~DnH#IBYKg_nv`fTgf
z)6&O*-K@{K_K1h4Liq()8AHdrcONFD_wL0+F%O>J%ch|d{(3J<kn=plf*JeRU^(CW
zplz)bDA;GHtaDg7yk2MsfX|*_E?f^~{h4KtbXtA=waa>gzjoosaRRQZolpLm<>*ST
zK8<~nZNTNz;U`%l`0rz5v}Ih45_5nx>u@{d-DlWGxMteh%mQV(9>sv?n%WTneEuAB
z$qk-iMqcvT=vM3XSi2SJ%9p6L&CBkcUQKNDf!PTuEy#@v2U(hERN;48r6#rQL-a@L
zD<$-KmP^oIX>j)*Vw2EmwI5=)1vOjAlxbxp<(OGw^5#wCG0oBdXB<Xn*`n5i;Hkr`
zRJ3a1vo{eK-GYjg+RA2w?s<H>s}*DIF?gYs#l{}9lH1hN7H844vP#@)ty(kHRjr;8
zR7=t^L|`z%@k3~vwinoLxx+iH6W(cc`b_Ht^u55=h)#7zC#+JJ;(U=!H)4AA>@jw?
z+-=oPNtIczu@MCGDOH_t`|*|I>|yzgRhjoXvk{c;vUZk6&U%#XoE5M+2rjj=Tjf5h
zf+wo~8WeF!omBi~YrmD)3^rl&vjx~fY%#V3n{?R<C;M=ypM8mbs^9xC>@*vRPNV%x
zbQ;B{*>LnLTK)Ocm<kB!I?bk-EH;979UPKmvAI8x{gvci_Lv=4L>l0~`!I_M@YF|L
zWYM_CG4~={hWm_%FQRLTfOjrp))NEgFS1FdI2*C6M<4301vW&(;_t9Wf~WUG-=RV$
zX(Zsi?~r^7e2Yb3s*R+nOQ5Xv)s?Q&Dpw)y-lcfJCVtQ64#Fwxr8IO^sW#+oiVY7I
zANw9xiW%_l?^(j4Oq;JUWY}&nh79b6#*l?@*Dri*)@PAygw^hQY)0hX>mJ!2;B4so
zfyKg>OPEyTz@bYlHZ0dhX6BWyDJ@%9y0FqwT7?JMsDvu!92Iy7*nVKWXzN(7Kr=7=
z53(g6w*H5WNXfSi3^(<9^wMcKs{*8^{m2fRgn#IP>-A~4zWUdH*!;nHHj<oOy2@FJ
z8>`bD)hHmBXIN`~WaE=d2F$0z<~yMZ74N_qRY2E|Xks<6@h9Ao=(x;Ml4}QGs8cc2
zUu8+14~BZI<236$U2pk`jmKpgqPY`Yf6q^tS#AaPGmA^uHb4>$H%g+xgJ>rt|AOl?
z*Uy+&JK@C77(*J>rXtVNjM>3>Q(%Q_<yKB^o9bBZ%qlM{Ts2K=s_~p$OH$JST6=D!
zwZ}tikB8R8evFs68}loRf>}ClHJJ7D(W^Yt&z6wZ#{E3QcECpT^Kn<exfZ!y)drKV
zu!vCy25_iZ<<Kgx&NTZlXqCpFW)*!T81+0<Zn42IJ&#6zbmj`1i2FPEYGrcUfI%O<
zanMISgH{W99xnAXgrEG17}|d`_72~%cl_SiI|hvXgzwl-+%WbN9#V@4AB9^}4TL9+
z>KxF&%eQ~m4gK*%Q=R)2!f#JLHK5~JicIUis#a&|b;a!VOt#z8^(mPzkb7+Kv&=)o
zdTk{74m>=Y7(K2)l6nzPyJnLtov}f^9uE$$$?&Yohezd0Hn372h9@?@@{`YM;GvpK
z;EgM|&dt{I@p7NmT&UiRxq_L?WiM2cfoI75J`|{`CBzQUk9Tkk(52(yzoCGxwcd#1
znL&9hY$iMa>L|C3GT4264YB*^4epOzGfj`)2$&~$FnmaP3bBBm@u{I9cD$~IE^%sY
zwX+Iy%4p0faUJ6-ih*Ug+Y=gRzm|Ar78O-Gt5hKKN=H=;>|i`BBnCHJy?JLIUZla7
z{;tyUn(732i}B&;t-oPB1`8|aBYqFKv<wf?V<4Av|4qM3E4Xq<b#1vb={g;Cs45-Y
zEU9tcKwq*TM?51^Z?pK>Z}8H7$QN`nY9R;Mr2xDCIBgFJZWG)N-*dh|gsAT#?6?z$
z3>z!Q*irQAn(v^%IVIrrGCn*ZX#ky+8|kEY=%lDPB5t;;!$!BOUKgmr7?V0ca1&I)
zWgreMN=(3>SDk8x9n1zE1CJT`?Ysx0p2}wEx`He3439=6nfRFWI7~Aas8MaDI=9RL
z$g_O4BFhK2M=P>CXkIq)g~{0iI%*b`b9Gd{7M0_fTaKqIT2vscG4o+^t{pc!0^myv
zw?Lbjm&kcO4Kpmry?iLO@G)3?j7*dZuD!0f!!dxN&Knu(^bl1GJr53+Tmw3k+}NSS
z)1kx;z3T4faVsB*x%byt$Q4@Kbc`znHa;7RXKg%gRLy|tk3*T%UY$Lzxvz?()`PYd
z`fdExsdWSTB>VPBzM)V30N@5yFtv85H}Sz?UaSq;?6%r>+NG^nmABcYZIIx{({Po(
z+>eh(^=tCunWGv9VA|qKcFPT9n}84E*@O3>5iLLyR&P?Qa#W!@?t!8~yew`10AvT$
zVYOVH_yHWn)tR?o9o0*1lAG<A51R(_5)3`HgZbi3t%&5h;kDf;jy6wkwUCe6CHzA<
zrZwZK`~dRmTu_}orr<Ld^-dZRZy!KL1@v5<483-D(hJz3(!u%(yLv@25H|U9tLc~E
z1+`Vx&JsP|m1y$kiKvKw^+&a6f{SXgA0h+zXv|3G1n{Z2!L~DiPsewk1Yopk0Yf0i
zb-8d)!AqU912Om-jCjjB5H<$#X|mt7dV*UpKYfgmJPFZH;F(`?5KoX3jBW=mMixZi
zrRxXS5ZuvwHV7%@!lyy#jq|`9%*U!{HNiXyDU}A}(2m0HV0`BXoxuoM0R3uF0!blQ
z)ImWAPsVKKLA7Xt<7(qk_%;N)_drA_Pg0LTW{2|8c(ws`p*S}J&7ph*>Q7V{PXlup
zAB2bVSkJ<-IKz-$E<6;502OddEgIp!!}v(cSu-iVy2ZW%a)bF`_fT^vF}o+5W1uXY
zhaldq;e4EIvA7SL!{F0!9!DY`iQwsSlEppR5&-W+ax*NA;OlYw()~9}D0~;e!*K%k
zNE~uKOo_xe*=BL?w?;r?B)&NcFGTX2a8u_hZxcM%*n41N6u(IyVfEf)hwV{33RC+-
zQG5|DnBB?t2vrhhNRH-#a*owqVGjo-f?M6Y?RL4q>Ta<I!(-7rN?&93UT=qwqIm=^
zMEjzViFFVg!!vM1%VO}U3wFk!pE?C^#qcbBx7B-b9pYnoI4+W4Ml6q&r7R_D2oE;k
zm5V#$`JFO}P>zk@s|YS*h9&ZeBtco8$hQ;Z^Tj0oCi3~^QT#Bf)ymO)sV<9$z=_cq
z9hWIzkLLbFF{PmJP!%SO;rXbS?lF88DNw!~!=KY(`KR%GH^GyT;c57`La9&V;d;!6
zA5Z6ZVG)wSA0yk8moxZoJeE-APvv$Uu5t^rcn#UFbZ7A#J!YZ9Z{<IcHi(_YpTRuu
z-C3w5{apEJ7Vp+EG%ZQ*AdvY34}!kAe5&jx+~4X0;MRG34hc}2=J6byDEvDQ3C1YP
z7x7v>Nl`w(oj*ZHhO%jiniOR!yPe#KH<^^DT%77ij?z-hJ8)w{sjB99<2+wku$Gq*
zUA`z#I@a;MGPXGH;t4vPLpYV4bv!`5r1A7d{vUj?cN2daXRO>(&x_RUf`gm+G#tQ%
z&HOz?P!Go&_%?*j`2(*+2mbmW_;^E&*edRUiXl8&3Hu|TOOPdNw(wpvvLvUG2jRtW
z#rX(7Nst%gcJX;=X$`yhM$DS^O?(mF=f0x}`GwnvU61kxyxRl44~m<V?Yns$k<SXn
zx`$IDpA$;p<NP&_o&Y|1isMB%0aKpkb@F8a$DhTK6Q%4K4g|deoOYqBh<Fx-EMW4p
zNXY`PJ<EUefW#8$K8W=EU~UT^f;v;x!tX>*f7rtPWnF}EvpSI&#dw&vQ}vonW)Ozi
zf4<D4@rsvT(!T?*9}@l$q3|VW)ORVU$w{zF;S*3QA1eHA+-Fke{Du3;_yNVTV?2=P
zGK_k7;23X(5y$!cy3DIjt6t|@@%Z54>-=r>B(Jn1Cvp_OH@IC!mk|3F??s)|cc2H!
zhvW`EQ!X&V`#mCEdAWm!=!rx5-~=xvxZa!mF5i!0>U|egy-qps9yfE;%ojf9Um?4j
zPVp6_`H@dhge}VSPxv|+x&G?k`70>sb=|1aX!l*8qQxBp{b%S3k3rgJ7??Yt;WJ*1
zx3PZujNeZ>q2Y6W32pQ49{xQ!rIdfczaa7%Bg{L^-<8i9A)=R`*7Y07Bmdxw@$B}t
zfAGx&zXa*|C;x>JKjqV}c_(23CgffWQP~#&Z=T~*P@VPXQ7usf)!!nS6!`U9RGv(j
zdx4MDWt(!K;R4z~E_{4}=a4)lwhyr*H}1N~%TN<@;G2v57UbEu@Aw!rRap50kH<R+
zJAUAIVsDqy_XB^^fS$YU7aoi5_3yvn#A+d~pZ|bPC;SSZDC*TA<&fxX=)Ho;p7R~d
zM0Z}nptDnX{R%HtzlIo1#3TdCr<96eXxWWaOh&ezqGE!*)7XanB@$sE+lYjWjASAb
zIheu3TclHwxj3uGsPK(h4Ab|T&X|-yi+D&@1H`j7u^LavA<!;vrkB(~+yf^!ix4QY
zix9Ntt#*N;ZBl;p6GmP5WmBAFU+5~YBHAz4ekGzejO#ZkkpW`39)rP*5TtE@=c2`M
z<c$Tkhln}45OV~a4iT4;)6ubFqO1f&h*F)JN=M^t{_oJu2Gr}n4Hv4f|DWNa1l9Yt
zc#&>EwQfuj)u`6y!>XkuLfUANdHv~`(c)#4;nHL=75&i@$>L^$XHh*V0uRZQJI9C`
z5|L|8l+1HnRYd)<C+5~TYn%(!%jQ)?%2O_k6+44b%=gX~x9FVaeJYW7_<puXM>m|3
zBUT`uT{)sp747{CL=lSi+XW&TEy2D}G@^t}cZgbu$`gaI>9IS+AK*xyuo&vh$Yz+l
zM5My6c_IJ<&Xpx13dSuGq3FiuE)pYjTg}^G!y+*b#oxL}{8@*=IPZ33K$8#V9Ui2Q
zA(QvO#3iECaNs)Pe=b2}$5h0r=;#sWFtld;cf#E1MGR1Y3^@fK7Kl$skJ5an7;eCQ
z!mdJ*f@iCc8H2a6!-_;aJEvj|AaG$3I-5I-L?3Qb&2x&|a5>cM6lqxe!ztFveP+0Q
zg$Ps6UDvG;<M7;7X;~qr$*2*1tHer_@xc{1%k?hdPlU3~C1w&#8K5^7jqaP(VmMl-
z{~BZ|dc(VmMHX)NwHJ$cTsi(vv3MPcJXaz%U_6a46)vlqS(I4<lxH@Gj~Hh38#aqs
z@+FgUVzanYhs&b)EqJpu6JFnf^c@yuTeB#TmF0gD56igkv*~%UIiSvRlZ1P8C63B9
z5=o#Wk4Y$p9S`E*^5MXPB2Cv|*$Ss06dz$u_R@CIi2?ZD9pY_*(eu`a#CU@PI~;|E
zhs0?0C`xtzzj5t`EJ|t^^c>_BD1pB=iU-k`<vk+q(sf$8;KN768P+avHR!~TTy~)!
z`UsFor(ohPF%du9TDMEo<L<3;xk;Q*#q#D}@wAM`MaBEXT^Kz6y-$SU!HuGOLM+x<
z>X<oto^}f&yC_Flc|c6pA^Q$KBX+ANC+%BM|C5vlnuS9q^~(9@#Ez?MK!egBLL*6q
zzZ^oc6W~9G(A+a%#bME*&lmWyR{1l0Of-JZh3m>`#ZE_QO*xvoY|pf+?&bg`TagX8
zery$^b&V`rd9hW@BdE0c7X_YAH^GRP#6MA8tZm{}Eb`mLP|O52wu#SW89&C_o`w4v
z>y9923V<FJDR=}?4VlAuB;0mX+=oW>`B8!Q4;B83SS6DZrQ%hQM`#JtOa2AWYZSrI
z_^Plg-yah(YWN8}F7}~#l;db9ZSY^mQFg8H-0NZ~KAGCZ1IX50?E-Is*1+C3#2a{&
zxcp6#s~=Qnogod?u9K?wN^xH8O2^w`whrUVpm)W41Qp}U_r!9v&8eMY6&{>E*C|Hh
z<UZ>ZbI}oxJSmpz_FE6Yj+5ekOen(M7juyC>i0zt5}x^ixC0gC$q&RDR0DgLID%?$
zqDy>g>1O^uQak*iz%#clIR25i7YR@OSY-0_KGu(u)z6MzfM3iwi=xXNl@9z86mO%C
zB1w##!>gOKrI`!XxXNcZoyEAY^InI=IT+yh=VG@(aEWxo(jE~f_u#o*2ZpI$IN2lS
z<5>X2ej%pIXRXSrFGMD$HCCANF27$%J+11NMak_I%M5XS)_GF;B4?#*MXk@LBA6=X
zO8PzW1i4Rt$r=xRU$Vg~E?c)unYPMVxMo4Qvk<$e%glbOJxyy{xZc}BZ1!ZW<rZhD
zv&vOvu#NLT<lwovqu5nlD{SN9H3<A7uF&ac+mfg~c|HjWfO+RcP)3Mt%XF<Z2S1F(
z4~iT!9aXDhZ1rTE2MoVHEOxj`5^P(N$TGWn;9?G6>lu=i1y7zs7oG&Y=g@skfY9^e
zMO6Be=f!k=rp^8&be$K0O5(TTVWLa1Wx<;l#QVBDTPig6iPgp=n_Z=sXLEwMD9-u2
zY`l7XiY<P5aarM-rMRkIHLgT4eutjNR%07lrQT9j)vANlDoZYjXJy)8+iHvUgQ#fZ
zNady<MXZb(<q*ah%D$h(Y7@y;Ow_28akX#b#!cw>w{znZEI#1I9T<oaGFeCW48(B`
z@0ePR3sG_PHsb^8a$~36ct09YRG=}$8n120EmcRIw%-Pg0miMKSCRH(p2?)ys~pAE
zY3(+c6KKqa>4S`!a;FXAgNz&StFAT9+O$(Pr6tI?h>RRMA3tVW>&z^7g@-Tqd;+5n
zcX)ONvssT#pF*PN<CTWW(kj?a4I%Jf{>DdsZzQ<C>N66*%f2J2C%89?L%Cfi?5Xfv
zpz&_>=EH-Gn~^&QgNz%=$P}Bo*jZ}NwM)}0YMhm|OIJ9ZON&q!T*Xz~T(iE~3y^1*
zpd`W=sZ@m+(T6zT^DyHXOyu4OH{OMbTt<X3PsU*Gjxzp1hk{YUqm8TO;0k*>PI@Vx
zzc?$G;<c=@6`Ig8RG}p*e~UHl_KLB<(*kkEbYC$pBuZ(5u@+Y!@ZTeh2hia>IMO&C
zbG?p{#?|Ofk`s-8HEp%~Yp8eH8x@jd+(HJlJUq&HMD?IK$;PR;E3qTlI1P)BlZ}hf
xAtt668}#U7UPv`=gwMwthmd10-(>6~a=TsmF3otSPVTTP3(}3LM0dh2{Vy%9#fAU?

diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs
index 7a72f8e26b0..440972c7a68 100644
--- a/substrate/frame/revive/rpc/src/client.rs
+++ b/substrate/frame/revive/rpc/src/client.rs
@@ -18,7 +18,6 @@
 //! and is used by the rpc server to query and send transactions to the substrate chain.
 use crate::{
 	extract_receipts_from_block,
-	runtime::gas_from_fee,
 	subxt_client::{
 		revive::calls::types::EthTransact, runtime_types::pallet_revive::storage::ContractInfo,
 	},
@@ -649,8 +648,7 @@ impl Client {
 		hydrated_transactions: bool,
 	) -> Result<Block, ClientError> {
 		let runtime_api = self.api.runtime_api().at(block.hash());
-		let max_fee = Self::weight_to_fee(&runtime_api, self.max_block_weight()).await?;
-		let gas_limit = gas_from_fee(max_fee);
+		let gas_limit = Self::block_gas_limit(&runtime_api).await?;
 
 		let header = block.header();
 		let timestamp = extract_block_timestamp(&block).await.unwrap_or_default();
@@ -695,16 +693,13 @@ impl Client {
 	}
 
 	/// Convert a weight to a fee.
-	async fn weight_to_fee(
+	async fn block_gas_limit(
 		runtime_api: &subxt::runtime_api::RuntimeApi<SrcChainConfig, OnlineClient<SrcChainConfig>>,
-		weight: Weight,
-	) -> Result<Balance, ClientError> {
-		let payload = subxt_client::apis()
-			.transaction_payment_api()
-			.query_weight_to_fee(weight.into());
+	) -> Result<U256, ClientError> {
+		let payload = subxt_client::apis().revive_api().block_gas_limit();
 
-		let fee = runtime_api.call(payload).await?;
-		Ok(fee)
+		let gas_limit = runtime_api.call(payload).await?;
+		Ok(*gas_limit)
 	}
 
 	/// Get the chain ID.
diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs
index 16bdd6d1a18..a19ed28dd9b 100644
--- a/substrate/frame/revive/src/benchmarking/mod.rs
+++ b/substrate/frame/revive/src/benchmarking/mod.rs
@@ -27,7 +27,7 @@ use crate::{
 	exec::{Key, MomentOf},
 	limits,
 	storage::WriteOutcome,
-	Pallet as Contracts, *,
+	ConversionPrecision, Pallet as Contracts, *,
 };
 use alloc::{vec, vec::Vec};
 use codec::{Encode, MaxEncodedLen};
@@ -1771,7 +1771,9 @@ mod benchmarks {
 		assert!(ContractInfoOf::<T>::get(&addr).is_some());
 		assert_eq!(
 			T::Currency::balance(&account_id),
-			Pallet::<T>::min_balance() + Pallet::<T>::convert_evm_to_native(value.into()).unwrap()
+			Pallet::<T>::min_balance() +
+				Pallet::<T>::convert_evm_to_native(value.into(), ConversionPrecision::Exact)
+					.unwrap()
 		);
 		Ok(())
 	}
diff --git a/substrate/frame/revive/src/evm/gas_encoder.rs b/substrate/frame/revive/src/evm/gas_encoder.rs
index ffdf8b13c04..8853e77e958 100644
--- a/substrate/frame/revive/src/evm/gas_encoder.rs
+++ b/substrate/frame/revive/src/evm/gas_encoder.rs
@@ -72,6 +72,12 @@ pub trait GasEncoder<Balance>: private::Sealed {
 	/// Decodes the weight and deposit from the encoded gas value.
 	/// Returns `None` if the gas value is invalid
 	fn decode(gas: U256) -> Option<(Weight, Balance)>;
+
+	/// Returns the encoded values of the specified weight and deposit.
+	fn as_encoded_values(weight: Weight, deposit: Balance) -> (Weight, Balance) {
+		let encoded = Self::encode(U256::zero(), weight, deposit);
+		Self::decode(encoded).expect("encoded values should be decodable; qed")
+	}
 }
 
 impl<Balance> GasEncoder<Balance> for ()
@@ -148,6 +154,11 @@ mod test {
 
 		assert!(decoded_deposit >= deposit);
 		assert!(deposit * 2 >= decoded_deposit);
+
+		assert_eq!(
+			(decoded_weight, decoded_deposit),
+			<() as GasEncoder<u64>>::as_encoded_values(weight, deposit)
+		);
 	}
 
 	#[test]
diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs
index 0e5fc3da545..09bfbf380c6 100644
--- a/substrate/frame/revive/src/evm/runtime.rs
+++ b/substrate/frame/revive/src/evm/runtime.rs
@@ -20,7 +20,8 @@ use crate::{
 		api::{GenericTransaction, TransactionSigned},
 		GasEncoder,
 	},
-	AccountIdOf, AddressMapper, BalanceOf, Config, MomentOf, Weight, LOG_TARGET,
+	AccountIdOf, AddressMapper, BalanceOf, Config, ConversionPrecision, MomentOf, Pallet,
+	LOG_TARGET,
 };
 use alloc::vec::Vec;
 use codec::{Decode, Encode};
@@ -34,8 +35,8 @@ use sp_core::{Get, H256, U256};
 use sp_runtime::{
 	generic::{self, CheckedExtrinsic, ExtrinsicFormat},
 	traits::{
-		self, AtLeast32BitUnsigned, Checkable, Dispatchable, ExtrinsicLike, ExtrinsicMetadata,
-		IdentifyAccount, Member, TransactionExtension,
+		self, Checkable, Dispatchable, ExtrinsicLike, ExtrinsicMetadata, IdentifyAccount, Member,
+		TransactionExtension,
 	},
 	transaction_validity::{InvalidTransaction, TransactionValidityError},
 	OpaqueExtrinsic, RuntimeDebug, Saturating,
@@ -56,34 +57,6 @@ type CallOf<T> = <T as frame_system::Config>::RuntimeCall;
 /// - Not too low, enabling users to adjust the gas price to define a tip.
 pub const GAS_PRICE: u32 = 1_000u32;
 
-/// Convert a `Balance` into a gas value, using the fixed `GAS_PRICE`.
-/// The gas is calculated as `balance / GAS_PRICE`, rounded up to the nearest integer.
-pub fn gas_from_fee<Balance>(fee: Balance) -> U256
-where
-	u32: Into<Balance>,
-	Balance: Into<U256> + AtLeast32BitUnsigned + Copy,
-{
-	let gas_price = GAS_PRICE.into();
-	let remainder = fee % gas_price;
-	if remainder.is_zero() {
-		(fee / gas_price).into()
-	} else {
-		(fee.saturating_add(gas_price) / gas_price).into()
-	}
-}
-
-/// Convert a `Weight` into a gas value, using the fixed `GAS_PRICE`.
-/// and the `Config::WeightPrice` to compute the fee.
-/// The gas is calculated as `fee / GAS_PRICE`, rounded up to the nearest integer.
-pub fn gas_from_weight<T: Config>(weight: Weight) -> U256
-where
-	BalanceOf<T>: Into<U256>,
-{
-	use sp_runtime::traits::Convert;
-	let fee: BalanceOf<T> = T::WeightPrice::convert(weight);
-	gas_from_fee(fee)
-}
-
 /// Wraps [`generic::UncheckedExtrinsic`] to support checking unsigned
 /// [`crate::Call::eth_transact`] extrinsic.
 #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)]
@@ -346,11 +319,14 @@ pub trait EthExtra {
 			return Err(InvalidTransaction::Call);
 		}
 
-		let value = crate::Pallet::<Self::Config>::convert_evm_to_native(value.unwrap_or_default())
-			.map_err(|err| {
-				log::debug!(target: LOG_TARGET, "Failed to convert value to native: {err:?}");
-				InvalidTransaction::Call
-			})?;
+		let value = crate::Pallet::<Self::Config>::convert_evm_to_native(
+			value.unwrap_or_default(),
+			ConversionPrecision::Exact,
+		)
+		.map_err(|err| {
+			log::debug!(target: LOG_TARGET, "Failed to convert value to native: {err:?}");
+			InvalidTransaction::Call
+		})?;
 
 		let data = input.unwrap_or_default().0;
 
@@ -393,17 +369,21 @@ pub trait EthExtra {
 		let nonce = nonce.unwrap_or_default().try_into().map_err(|_| InvalidTransaction::Call)?;
 
 		// Fees calculated with the fixed `GAS_PRICE`
-		// When we dry-run the transaction, we set the gas to `Fee / GAS_PRICE`
+		// When we dry-run the transaction, we set the gas to `fee / GAS_PRICE`
 		let eth_fee_no_tip = U256::from(GAS_PRICE)
 			.saturating_mul(gas)
 			.try_into()
 			.map_err(|_| InvalidTransaction::Call)?;
 
-		// Fees with the actual gas_price from the transaction.
-		let eth_fee: BalanceOf<Self::Config> = U256::from(gas_price.unwrap_or_default())
-			.saturating_mul(gas)
-			.try_into()
-			.map_err(|_| InvalidTransaction::Call)?;
+		// Fees calculated from the gas and gas_price of the transaction.
+		let eth_fee = Pallet::<Self::Config>::convert_evm_to_native(
+			U256::from(gas_price.unwrap_or_default()).saturating_mul(gas),
+			ConversionPrecision::RoundUp,
+		)
+		.map_err(|err| {
+			log::debug!(target: LOG_TARGET, "Failed to compute eth_fee: {err:?}");
+			InvalidTransaction::Call
+		})?;
 
 		let info = call.get_dispatch_info();
 		let function: CallOf<Self::Config> = call.into();
diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs
index d2ef6c9c7ba..14ab917c0d4 100644
--- a/substrate/frame/revive/src/exec.rs
+++ b/substrate/frame/revive/src/exec.rs
@@ -24,8 +24,8 @@ use crate::{
 	storage::{self, meter::Diff, WriteOutcome},
 	tracing::if_tracing,
 	transient_storage::TransientStorage,
-	BalanceOf, CodeInfo, CodeInfoOf, Config, ContractInfo, ContractInfoOf, Error, Event,
-	ImmutableData, ImmutableDataOf, Pallet as Contracts,
+	BalanceOf, CodeInfo, CodeInfoOf, Config, ContractInfo, ContractInfoOf, ConversionPrecision,
+	Error, Event, ImmutableData, ImmutableDataOf, Pallet as Contracts,
 };
 use alloc::vec::Vec;
 use core::{fmt::Debug, marker::PhantomData, mem};
@@ -1273,7 +1273,7 @@ where
 		to: &T::AccountId,
 		value: U256,
 	) -> ExecResult {
-		let value = crate::Pallet::<T>::convert_evm_to_native(value)?;
+		let value = crate::Pallet::<T>::convert_evm_to_native(value, ConversionPrecision::Exact)?;
 		if value.is_zero() {
 			return Ok(Default::default());
 		}
diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs
index c36cb3f47ca..7f4565a9f08 100644
--- a/substrate/frame/revive/src/lib.rs
+++ b/substrate/frame/revive/src/lib.rs
@@ -41,10 +41,7 @@ pub mod tracing;
 pub mod weights;
 
 use crate::{
-	evm::{
-		runtime::{gas_from_fee, GAS_PRICE},
-		GasEncoder, GenericTransaction,
-	},
+	evm::{runtime::GAS_PRICE, GasEncoder, GenericTransaction},
 	exec::{AccountIdOf, ExecError, Executable, Ext, Key, Stack as ExecStack},
 	gas::GasMeter,
 	storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager},
@@ -1140,16 +1137,20 @@ where
 		if tx.nonce.is_none() {
 			tx.nonce = Some(<System<T>>::account_nonce(&origin).into());
 		}
+		if tx.chain_id.is_none() {
+			tx.chain_id = Some(T::ChainId::get().into());
+		}
 		if tx.gas_price.is_none() {
 			tx.gas_price = Some(GAS_PRICE.into());
 		}
-		if tx.chain_id.is_none() {
-			tx.chain_id = Some(T::ChainId::get().into());
+		if tx.gas.is_none() {
+			tx.gas = Some(Self::evm_block_gas_limit());
 		}
 
 		// Convert the value to the native balance type.
 		let evm_value = tx.value.unwrap_or_default();
-		let native_value = match Self::convert_evm_to_native(evm_value) {
+		let native_value = match Self::convert_evm_to_native(evm_value, ConversionPrecision::Exact)
+		{
 			Ok(v) => v,
 			Err(_) => return Err(EthTransactError::Message("Failed to convert value".into())),
 		};
@@ -1206,12 +1207,16 @@ where
 					data,
 					eth_gas: Default::default(),
 				};
-				// Get the dispatch info of the call.
+
+				let (gas_limit, storage_deposit_limit) = T::EthGasEncoder::as_encoded_values(
+					result.gas_required,
+					result.storage_deposit,
+				);
 				let dispatch_call: <T as Config>::RuntimeCall = crate::Call::<T>::call {
 					dest,
 					value: native_value,
-					gas_limit: result.gas_required,
-					storage_deposit_limit: result.storage_deposit,
+					gas_limit,
+					storage_deposit_limit,
 					data: input.clone(),
 				}
 				.into();
@@ -1264,11 +1269,15 @@ where
 				};
 
 				// Get the dispatch info of the call.
+				let (gas_limit, storage_deposit_limit) = T::EthGasEncoder::as_encoded_values(
+					result.gas_required,
+					result.storage_deposit,
+				);
 				let dispatch_call: <T as Config>::RuntimeCall =
 					crate::Call::<T>::instantiate_with_code {
 						value: native_value,
-						gas_limit: result.gas_required,
-						storage_deposit_limit: result.storage_deposit,
+						gas_limit,
+						storage_deposit_limit,
 						code: code.to_vec(),
 						data: data.to_vec(),
 						salt: None,
@@ -1278,38 +1287,26 @@ where
 			},
 		};
 
-		// The transaction fees depend on the extrinsic's length, which in turn is influenced by
-		// the encoded length of the gas limit specified in the transaction (tx.gas).
-		// We iteratively compute the fee by adjusting tx.gas until the fee stabilizes.
-		// with a maximum of 3 iterations to avoid an infinite loop.
-		for _ in 0..3 {
-			let Ok(unsigned_tx) = tx.clone().try_into_unsigned() else {
-				log::debug!(target: LOG_TARGET, "Failed to convert to unsigned");
-				return Err(EthTransactError::Message("Invalid transaction".into()));
-			};
-
-			let eth_dispatch_call =
-				crate::Call::<T>::eth_transact { payload: unsigned_tx.dummy_signed_payload() };
-			let encoded_len = utx_encoded_size(eth_dispatch_call);
-			let fee = pallet_transaction_payment::Pallet::<T>::compute_fee(
-				encoded_len,
-				&dispatch_info,
-				0u32.into(),
-			)
-			.into();
-			let eth_gas = gas_from_fee(fee);
-			let eth_gas =
-				T::EthGasEncoder::encode(eth_gas, result.gas_required, result.storage_deposit);
-
-			if eth_gas == result.eth_gas {
-				log::trace!(target: LOG_TARGET, "bare_eth_call: encoded_len: {encoded_len:?} eth_gas: {eth_gas:?}");
-				break;
-			}
-			result.eth_gas = eth_gas;
-			tx.gas = Some(eth_gas.into());
-			log::debug!(target: LOG_TARGET, "Adjusting Eth gas to: {eth_gas:?}");
-		}
+		let Ok(unsigned_tx) = tx.clone().try_into_unsigned() else {
+			return Err(EthTransactError::Message("Invalid transaction".into()));
+		};
 
+		let eth_dispatch_call =
+			crate::Call::<T>::eth_transact { payload: unsigned_tx.dummy_signed_payload() };
+
+		let encoded_len = utx_encoded_size(eth_dispatch_call);
+		let fee = pallet_transaction_payment::Pallet::<T>::compute_fee(
+			encoded_len,
+			&dispatch_info,
+			0u32.into(),
+		)
+		.into();
+		let eth_gas = Self::evm_fee_to_gas(fee);
+		let eth_gas =
+			T::EthGasEncoder::encode(eth_gas, result.gas_required, result.storage_deposit);
+
+		log::trace!(target: LOG_TARGET, "bare_eth_call: encoded_len: {encoded_len:?} eth_gas: {eth_gas:?}");
+		result.eth_gas = eth_gas;
 		Ok(result)
 	}
 
@@ -1319,6 +1316,29 @@ where
 		Self::convert_native_to_evm(T::Currency::reducible_balance(&account, Preserve, Polite))
 	}
 
+	/// Convert an EVM fee into a gas value, using the fixed `GAS_PRICE`.
+	/// The gas is calculated as `fee / GAS_PRICE`, rounded up to the nearest integer.
+	pub fn evm_fee_to_gas(fee: BalanceOf<T>) -> U256 {
+		let fee = Self::convert_native_to_evm(fee);
+		let gas_price = GAS_PRICE.into();
+		let (quotient, remainder) = fee.div_mod(gas_price);
+		if remainder.is_zero() {
+			quotient
+		} else {
+			quotient + U256::one()
+		}
+	}
+
+	pub fn evm_block_gas_limit() -> U256 {
+		let max_block_weight = T::BlockWeights::get()
+			.get(DispatchClass::Normal)
+			.max_total
+			.unwrap_or_else(|| T::BlockWeights::get().max_block);
+
+		let fee = T::WeightPrice::convert(max_block_weight);
+		Self::evm_fee_to_gas(fee)
+	}
+
 	/// A generalized version of [`Self::upload_code`].
 	///
 	/// It is identical to [`Self::upload_code`] and only differs in the information it returns.
@@ -1379,16 +1399,22 @@ where
 	}
 
 	/// Convert an EVM balance to a native balance.
-	fn convert_evm_to_native(value: U256) -> Result<BalanceOf<T>, Error<T>> {
+	fn convert_evm_to_native(
+		value: U256,
+		precision: ConversionPrecision,
+	) -> Result<BalanceOf<T>, Error<T>> {
 		if value.is_zero() {
 			return Ok(Zero::zero())
 		}
-		let ratio = T::NativeToEthRatio::get().into();
-		let res = value.checked_div(ratio).expect("divisor is non-zero; qed");
-		if res.saturating_mul(ratio) == value {
-			res.try_into().map_err(|_| Error::<T>::BalanceConversionFailed)
-		} else {
-			Err(Error::<T>::DecimalPrecisionLoss)
+
+		let (quotient, remainder) = value.div_mod(T::NativeToEthRatio::get().into());
+		match (precision, remainder.is_zero()) {
+			(ConversionPrecision::Exact, false) => Err(Error::<T>::DecimalPrecisionLoss),
+			(_, true) => quotient.try_into().map_err(|_| Error::<T>::BalanceConversionFailed),
+			(_, false) => quotient
+				.saturating_add(U256::one())
+				.try_into()
+				.map_err(|_| Error::<T>::BalanceConversionFailed),
 		}
 	}
 }
@@ -1417,6 +1443,9 @@ sp_api::decl_runtime_apis! {
 		Nonce: Codec,
 		BlockNumber: Codec,
 	{
+		/// Returns the block gas limit.
+		fn block_gas_limit() -> U256;
+
 		/// Returns the free balance of the given `[H160]` address, using EVM decimals.
 		fn balance(address: H160) -> U256;
 
diff --git a/substrate/frame/revive/src/primitives.rs b/substrate/frame/revive/src/primitives.rs
index 9c149c7cc38..e2900bd027b 100644
--- a/substrate/frame/revive/src/primitives.rs
+++ b/substrate/frame/revive/src/primitives.rs
@@ -108,6 +108,14 @@ pub enum EthTransactError {
 	Message(String),
 }
 
+/// Precision used for converting between Native and EVM balances.
+pub enum ConversionPrecision {
+	/// Exact conversion without any rounding.
+	Exact,
+	/// Conversion that rounds up to the nearest whole number.
+	RoundUp,
+}
+
 /// Result type of a `bare_code_upload` call.
 pub type CodeUploadResult<Balance> = Result<CodeUploadReturnValue<Balance>, DispatchError>;
 
-- 
GitLab


From fb2e414f44d4bfa2d47aba4a868fb3f8932f7930 Mon Sep 17 00:00:00 2001
From: Egor_P <egor@parity.io>
Date: Thu, 23 Jan 2025 11:27:39 +0100
Subject: [PATCH 100/116] [Release|CI/CD] Download only linux artefacts for deb
 package build (#7271)

This PR contains a fix for the rc-build release pipeline. The problem
was, that for the deb package build were all the artefacts downloaded
and merged together, what could lead to the issue, that the polkadot
linux binary was overwritten with the macos one.
---
 .github/workflows/release-reusable-rc-buid.yml | 17 ++++++++++++++++-
 1 file changed, 16 insertions(+), 1 deletion(-)

diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml
index 035b547603e..b79f7fa6175 100644
--- a/.github/workflows/release-reusable-rc-buid.yml
+++ b/.github/workflows/release-reusable-rc-buid.yml
@@ -263,9 +263,24 @@ jobs:
         ref: ${{ inputs.release_tag }}
         fetch-depth: 0
 
-    - name: Download artifacts
+    - name: Download polkadot_x86_64-unknown-linux-gnu artifacts
       uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
       with:
+        name: polkadot_x86_64-unknown-linux-gnu
+        path: target/production
+        merge-multiple: true
+
+    - name: Download polkadot-execute-worker_x86_64-unknown-linux-gnu artifacts
+      uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
+      with:
+        name: polkadot-execute-worker_x86_64-unknown-linux-gnu
+        path: target/production
+        merge-multiple: true
+
+    - name: Download polkadot-prepare-worker_x86_64-unknown-linux-gnu artifacts
+      uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
+      with:
+        name: polkadot-prepare-worker_x86_64-unknown-linux-gnu
         path: target/production
         merge-multiple: true
 
-- 
GitLab


From 04847d515ef56da4d0801c9b89a4241dfa827b33 Mon Sep 17 00:00:00 2001
From: runcomet <runcomet@protonmail.com>
Date: Thu, 23 Jan 2025 02:38:15 -0800
Subject: [PATCH 101/116] Balances: Configurable Number of Genesis Accounts
 with Specified Balances for Benchmarking (#6267)

# Derived Dev Accounts

Resolves https://github.com/paritytech/polkadot-sdk/issues/6040

## Description
This update introduces support for creating an arbitrary number of
developer accounts at the genesis block based on a specified derivation
path. This functionality is gated by the runtime-benchmarks feature,
ensuring it is only enabled during benchmarking scenarios.

### Key Features
- Arbitrary Dev Accounts at Genesis: Developers can now specify any
number of accounts to be generated at genesis using a hard derivation
path.

- Default Derivation Path: If no derivation path is provided (i.e., when
`Option<dev_accounts: (..., None)>` is set to `Some` at genesis), the
system will default to the path `//Sender//{}`.

- No Impact on Total Token Issuance: Developer accounts are excluded
from the total issuance of the token supply at genesis, ensuring they do
not affect the overall balance or token distribution.

polkadot address: 14SRqZTC1d8rfxL8W1tBTnfUBPU23ACFVPzp61FyGf4ftUFg

---------

Co-authored-by: Sebastian Kunert <skunert49@gmail.com>
---
 bridges/modules/messages/src/tests/mock.rs    |   9 +-
 .../pallets/collator-selection/src/mock.rs    |   2 +-
 .../assets/asset-hub-rococo/src/genesis.rs    |   1 +
 .../assets/asset-hub-westend/src/genesis.rs   |   1 +
 .../bridges/bridge-hub-rococo/src/genesis.rs  |   1 +
 .../bridges/bridge-hub-westend/src/genesis.rs |   1 +
 .../collectives-westend/src/genesis.rs        |   1 +
 .../coretime/coretime-rococo/src/genesis.rs   |   1 +
 .../coretime/coretime-westend/src/genesis.rs  |   1 +
 .../people/people-rococo/src/genesis.rs       |   1 +
 .../people/people-westend/src/genesis.rs      |   1 +
 .../parachains/testing/penpal/src/genesis.rs  |   1 +
 .../chains/relays/rococo/src/genesis.rs       |   1 +
 .../chains/relays/westend/src/genesis.rs      |   1 +
 .../parachains/runtimes/test-utils/src/lib.rs |   2 +-
 .../runtime/common/src/assigned_slots/mod.rs  |   1 +
 polkadot/runtime/common/src/auctions/mock.rs  |   1 +
 polkadot/runtime/common/src/crowdloan/mod.rs  |   1 +
 .../common/src/paras_registrar/mock.rs        |   1 +
 polkadot/runtime/common/src/slots/mod.rs      |   1 +
 .../relay_token_transactor/network.rs         |   9 +-
 polkadot/xcm/pallet-xcm/src/mock.rs           |   2 +-
 .../single_asset_adapter/mock.rs              |   1 +
 polkadot/xcm/xcm-builder/tests/mock/mod.rs    |   2 +-
 polkadot/xcm/xcm-runtime-apis/tests/mock.rs   |   4 +-
 polkadot/xcm/xcm-simulator/example/src/lib.rs |   2 +
 polkadot/xcm/xcm-simulator/fuzzer/src/fuzz.rs |   3 +-
 prdoc/pr_6267.prdoc                           | 171 ++++++++++++++++++
 .../cli/tests/res/default_genesis_config.json |   3 +-
 substrate/bin/node/testing/src/genesis.rs     |   2 +-
 .../tests/expected/create_default.json        |   3 +-
 .../tests/expected/create_parachain.json      |   3 +-
 .../tests/expected/create_with_params.json    |   3 +-
 .../tests/expected/doc/create_default.json    |   3 +-
 .../tests/expected/doc/display_preset.json    |   2 +-
 .../chain-spec/src/genesis_config_builder.rs  |   2 +-
 substrate/frame/alliance/src/mock.rs          |   1 +
 .../frame/asset-conversion/ops/src/mock.rs    |   1 +
 substrate/frame/asset-conversion/src/mock.rs  |   1 +
 substrate/frame/asset-rewards/src/mock.rs     |   1 +
 substrate/frame/atomic-swap/src/tests.rs      |   5 +-
 substrate/frame/babe/src/mock.rs              |   2 +-
 substrate/frame/balances/Cargo.toml           |   6 +-
 substrate/frame/balances/src/lib.rs           |  64 ++++++-
 .../balances/src/tests/currency_tests.rs      |  11 +-
 substrate/frame/balances/src/tests/mod.rs     |  40 +++-
 substrate/frame/beefy/src/mock.rs             |   2 +-
 substrate/frame/bounties/src/tests.rs         |  15 +-
 substrate/frame/child-bounties/src/tests.rs   |   1 +
 substrate/frame/collective/src/tests.rs       |   5 +-
 .../frame/contracts/mock-network/src/lib.rs   |   2 +
 substrate/frame/contracts/src/tests.rs        |   2 +-
 .../frame/conviction-voting/src/tests.rs      |   1 +
 substrate/frame/delegated-staking/src/mock.rs |   1 +
 substrate/frame/democracy/src/tests.rs        |   1 +
 .../election-provider-multi-phase/src/mock.rs |   1 +
 .../test-staking-e2e/src/mock.rs              |   1 +
 substrate/frame/elections-phragmen/src/lib.rs |   1 +
 substrate/frame/executive/src/tests.rs        |  20 +-
 substrate/frame/fast-unstake/src/mock.rs      |   1 +
 substrate/frame/grandpa/src/mock.rs           |   2 +-
 substrate/frame/identity/src/tests.rs         |   1 +
 substrate/frame/indices/src/mock.rs           |   1 +
 substrate/frame/lottery/src/mock.rs           |   1 +
 substrate/frame/multisig/src/tests.rs         |   1 +
 substrate/frame/nis/src/mock.rs               |   1 +
 .../test-delegate-stake/src/mock.rs           |   1 +
 substrate/frame/preimage/src/mock.rs          |   1 +
 substrate/frame/proxy/src/tests.rs            |   1 +
 substrate/frame/recovery/src/mock.rs          |   1 +
 substrate/frame/referenda/src/mock.rs         |   2 +-
 .../frame/revive/mock-network/src/lib.rs      |   2 +
 substrate/frame/revive/src/tests.rs           |   2 +-
 substrate/frame/root-offences/src/mock.rs     |   1 +
 substrate/frame/safe-mode/src/mock.rs         |   1 +
 substrate/frame/scored-pool/src/mock.rs       |   2 +-
 substrate/frame/society/src/mock.rs           |   2 +-
 substrate/frame/staking/src/mock.rs           |   1 +
 .../frame/state-trie-migration/src/lib.rs     |   9 +-
 substrate/frame/statement/src/mock.rs         |   1 +
 substrate/frame/tips/src/tests.rs             |   6 +-
 .../asset-conversion-tx-payment/src/tests.rs  |   1 +
 .../asset-tx-payment/src/tests.rs             |   1 +
 .../frame/transaction-payment/src/tests.rs    |   1 +
 .../frame/transaction-storage/src/mock.rs     |   1 +
 substrate/frame/treasury/src/tests.rs         |  11 +-
 substrate/frame/tx-pause/src/mock.rs          |   1 +
 substrate/frame/utility/src/tests.rs          |   1 +
 substrate/frame/vesting/src/mock.rs           |   1 +
 .../test-utils/runtime/src/genesismap.rs      |   5 +-
 substrate/test-utils/runtime/src/lib.rs       |   2 +-
 91 files changed, 434 insertions(+), 62 deletions(-)
 create mode 100644 prdoc/pr_6267.prdoc

diff --git a/bridges/modules/messages/src/tests/mock.rs b/bridges/modules/messages/src/tests/mock.rs
index 2935ebd6961..8eebdf3a508 100644
--- a/bridges/modules/messages/src/tests/mock.rs
+++ b/bridges/modules/messages/src/tests/mock.rs
@@ -461,9 +461,12 @@ pub fn inbound_unrewarded_relayers_state(lane: TestLaneIdType) -> UnrewardedRela
 /// Return test externalities to use in tests.
 pub fn new_test_ext() -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<TestRuntime>::default().build_storage().unwrap();
-	pallet_balances::GenesisConfig::<TestRuntime> { balances: vec![(ENDOWED_ACCOUNT, 1_000_000)] }
-		.assimilate_storage(&mut t)
-		.unwrap();
+	pallet_balances::GenesisConfig::<TestRuntime> {
+		balances: vec![(ENDOWED_ACCOUNT, 1_000_000)],
+		..Default::default()
+	}
+	.assimilate_storage(&mut t)
+	.unwrap();
 	sp_io::TestExternalities::new(t)
 }
 
diff --git a/cumulus/pallets/collator-selection/src/mock.rs b/cumulus/pallets/collator-selection/src/mock.rs
index d13f9e9d8c4..6a97525c4f5 100644
--- a/cumulus/pallets/collator-selection/src/mock.rs
+++ b/cumulus/pallets/collator-selection/src/mock.rs
@@ -188,7 +188,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 		invulnerables,
 	};
 	let session = pallet_session::GenesisConfig::<Test> { keys, ..Default::default() };
-	pallet_balances::GenesisConfig::<Test> { balances }
+	pallet_balances::GenesisConfig::<Test> { balances, ..Default::default() }
 		.assimilate_storage(&mut t)
 		.unwrap();
 	// collator selection must be initialized before session.
diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs
index 3ffb9a704b4..4a10a1e10c7 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs
@@ -42,6 +42,7 @@ pub fn genesis() -> Storage {
 				.cloned()
 				.map(|k| (k, ED * 4096 * 4096))
 				.collect(),
+			..Default::default()
 		},
 		parachain_info: asset_hub_rococo_runtime::ParachainInfoConfig {
 			parachain_id: PARA_ID.into(),
diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs
index ef7997322da..0473686081e 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs
@@ -39,6 +39,7 @@ pub fn genesis() -> Storage {
 		system: asset_hub_westend_runtime::SystemConfig::default(),
 		balances: asset_hub_westend_runtime::BalancesConfig {
 			balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(),
+			..Default::default()
 		},
 		parachain_info: asset_hub_westend_runtime::ParachainInfoConfig {
 			parachain_id: PARA_ID.into(),
diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs
index 575017f88bb..62b2e4eed9e 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs
@@ -33,6 +33,7 @@ pub fn genesis() -> Storage {
 		system: bridge_hub_rococo_runtime::SystemConfig::default(),
 		balances: bridge_hub_rococo_runtime::BalancesConfig {
 			balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(),
+			..Default::default()
 		},
 		parachain_info: bridge_hub_rococo_runtime::ParachainInfoConfig {
 			parachain_id: PARA_ID.into(),
diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs
index eb4623084f8..5286110bcab 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs
@@ -33,6 +33,7 @@ pub fn genesis() -> Storage {
 		system: bridge_hub_westend_runtime::SystemConfig::default(),
 		balances: bridge_hub_westend_runtime::BalancesConfig {
 			balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(),
+			..Default::default()
 		},
 		parachain_info: bridge_hub_westend_runtime::ParachainInfoConfig {
 			parachain_id: PARA_ID.into(),
diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/genesis.rs
index d4ef184ea39..51e065a4ae5 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/genesis.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend/src/genesis.rs
@@ -30,6 +30,7 @@ pub fn genesis() -> Storage {
 		system: collectives_westend_runtime::SystemConfig::default(),
 		balances: collectives_westend_runtime::BalancesConfig {
 			balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(),
+			..Default::default()
 		},
 		parachain_info: collectives_westend_runtime::ParachainInfoConfig {
 			parachain_id: PARA_ID.into(),
diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/src/genesis.rs
index e0f035c368e..f2035c8654d 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/src/genesis.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-rococo/src/genesis.rs
@@ -30,6 +30,7 @@ pub fn genesis() -> Storage {
 		system: coretime_rococo_runtime::SystemConfig::default(),
 		balances: coretime_rococo_runtime::BalancesConfig {
 			balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(),
+			..Default::default()
 		},
 		parachain_info: coretime_rococo_runtime::ParachainInfoConfig {
 			parachain_id: PARA_ID.into(),
diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/src/genesis.rs
index 239ad3760c1..29894222eff 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/src/genesis.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/coretime/coretime-westend/src/genesis.rs
@@ -30,6 +30,7 @@ pub fn genesis() -> Storage {
 		system: coretime_westend_runtime::SystemConfig::default(),
 		balances: coretime_westend_runtime::BalancesConfig {
 			balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(),
+			..Default::default()
 		},
 		parachain_info: coretime_westend_runtime::ParachainInfoConfig {
 			parachain_id: PARA_ID.into(),
diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/genesis.rs
index 36a701d24c2..9772a64d23b 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/genesis.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/genesis.rs
@@ -31,6 +31,7 @@ pub fn genesis() -> Storage {
 		system: people_rococo_runtime::SystemConfig::default(),
 		balances: people_rococo_runtime::BalancesConfig {
 			balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(),
+			..Default::default()
 		},
 		parachain_info: people_rococo_runtime::ParachainInfoConfig {
 			parachain_id: ParaId::from(PARA_ID),
diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/genesis.rs
index 942ec1b31d2..377babc59f6 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/genesis.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/genesis.rs
@@ -31,6 +31,7 @@ pub fn genesis() -> Storage {
 		system: people_westend_runtime::SystemConfig::default(),
 		balances: people_westend_runtime::BalancesConfig {
 			balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(),
+			..Default::default()
 		},
 		parachain_info: people_westend_runtime::ParachainInfoConfig {
 			parachain_id: ParaId::from(PARA_ID),
diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs
index 63510d233d2..e514d0cb747 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs
@@ -40,6 +40,7 @@ pub fn genesis(para_id: u32) -> Storage {
 		system: penpal_runtime::SystemConfig::default(),
 		balances: penpal_runtime::BalancesConfig {
 			balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(),
+			..Default::default()
 		},
 		parachain_info: penpal_runtime::ParachainInfoConfig {
 			parachain_id: para_id.into(),
diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs
index 3d8b5b1a500..db9fe19dbdd 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs
@@ -57,6 +57,7 @@ pub fn genesis() -> Storage {
 		system: rococo_runtime::SystemConfig::default(),
 		balances: rococo_runtime::BalancesConfig {
 			balances: accounts::init_balances().iter().map(|k| (k.clone(), ENDOWMENT)).collect(),
+			..Default::default()
 		},
 		session: rococo_runtime::SessionConfig {
 			keys: validators::initial_authorities()
diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs
index f8d43cf4648..2f02ca5f193 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs
@@ -58,6 +58,7 @@ pub fn genesis() -> Storage {
 		system: westend_runtime::SystemConfig::default(),
 		balances: westend_runtime::BalancesConfig {
 			balances: accounts::init_balances().iter().cloned().map(|k| (k, ENDOWMENT)).collect(),
+			..Default::default()
 		},
 		session: westend_runtime::SessionConfig {
 			keys: validators::initial_authorities()
diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs
index 5c33809ba67..b46a68312aa 100644
--- a/cumulus/parachains/runtimes/test-utils/src/lib.rs
+++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs
@@ -230,7 +230,7 @@ impl<Runtime: BasicParachainRuntime> ExtBuilder<Runtime> {
 			.unwrap();
 		}
 
-		pallet_balances::GenesisConfig::<Runtime> { balances: self.balances }
+		pallet_balances::GenesisConfig::<Runtime> { balances: self.balances, ..Default::default() }
 			.assimilate_storage(&mut t)
 			.unwrap();
 
diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs
index dea29f53cad..81e2986ab6b 100644
--- a/polkadot/runtime/common/src/assigned_slots/mod.rs
+++ b/polkadot/runtime/common/src/assigned_slots/mod.rs
@@ -773,6 +773,7 @@ mod tests {
 		let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 		pallet_balances::GenesisConfig::<Test> {
 			balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)],
+			..Default::default()
 		}
 		.assimilate_storage(&mut t)
 		.unwrap();
diff --git a/polkadot/runtime/common/src/auctions/mock.rs b/polkadot/runtime/common/src/auctions/mock.rs
index e0365d363ca..191608f8c87 100644
--- a/polkadot/runtime/common/src/auctions/mock.rs
+++ b/polkadot/runtime/common/src/auctions/mock.rs
@@ -210,6 +210,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 	pallet_balances::GenesisConfig::<Test> {
 		balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs
index f8b3169407e..1b40f248bfb 100644
--- a/polkadot/runtime/common/src/crowdloan/mod.rs
+++ b/polkadot/runtime/common/src/crowdloan/mod.rs
@@ -1082,6 +1082,7 @@ mod tests {
 		let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 		pallet_balances::GenesisConfig::<Test> {
 			balances: vec![(1, 1000), (2, 2000), (3, 3000), (4, 4000)],
+			..Default::default()
 		}
 		.assimilate_storage(&mut t)
 		.unwrap();
diff --git a/polkadot/runtime/common/src/paras_registrar/mock.rs b/polkadot/runtime/common/src/paras_registrar/mock.rs
index 07b8fbca518..bb3728f0e12 100644
--- a/polkadot/runtime/common/src/paras_registrar/mock.rs
+++ b/polkadot/runtime/common/src/paras_registrar/mock.rs
@@ -166,6 +166,7 @@ pub fn new_test_ext() -> TestExternalities {
 
 	pallet_balances::GenesisConfig::<Test> {
 		balances: vec![(1, 10_000_000), (2, 10_000_000), (3, 10_000_000)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/polkadot/runtime/common/src/slots/mod.rs b/polkadot/runtime/common/src/slots/mod.rs
index 59a1f1870b2..131a75f3d74 100644
--- a/polkadot/runtime/common/src/slots/mod.rs
+++ b/polkadot/runtime/common/src/slots/mod.rs
@@ -578,6 +578,7 @@ mod tests {
 		let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 		pallet_balances::GenesisConfig::<Test> {
 			balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)],
+			..Default::default()
 		}
 		.assimilate_storage(&mut t)
 		.unwrap();
diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/network.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/network.rs
index 46ac0e5df63..71c14f6b241 100644
--- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/network.rs
+++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/network.rs
@@ -78,9 +78,12 @@ pub fn relay_ext() -> TestExternalities {
 
 	let mut t = frame_system::GenesisConfig::<Runtime>::default().build_storage().unwrap();
 
-	pallet_balances::GenesisConfig::<Runtime> { balances: vec![(ALICE, INITIAL_BALANCE)] }
-		.assimilate_storage(&mut t)
-		.unwrap();
+	pallet_balances::GenesisConfig::<Runtime> {
+		balances: vec![(ALICE, INITIAL_BALANCE)],
+		..Default::default()
+	}
+	.assimilate_storage(&mut t)
+	.unwrap();
 
 	let mut ext = TestExternalities::new(t);
 	ext.execute_with(|| {
diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs
index 8d0476b0e70..58b4226ccf1 100644
--- a/polkadot/xcm/pallet-xcm/src/mock.rs
+++ b/polkadot/xcm/pallet-xcm/src/mock.rs
@@ -700,7 +700,7 @@ pub(crate) fn new_test_ext_with_balances_and_xcm_version(
 ) -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 
-	pallet_balances::GenesisConfig::<Test> { balances }
+	pallet_balances::GenesisConfig::<Test> { balances, ..Default::default() }
 		.assimilate_storage(&mut t)
 		.unwrap();
 
diff --git a/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs b/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs
index e6fe8e45c26..55a924dbaa6 100644
--- a/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs
+++ b/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs
@@ -339,6 +339,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 
 	pallet_balances::GenesisConfig::<Runtime> {
 		balances: vec![(0, INITIAL_BALANCE), (1, INITIAL_BALANCE), (2, INITIAL_BALANCE)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs
index 0468b0a5410..c3e53284508 100644
--- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs
+++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs
@@ -243,7 +243,7 @@ construct_runtime!(
 pub fn kusama_like_with_balances(balances: Vec<(AccountId, Balance)>) -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<Runtime>::default().build_storage().unwrap();
 
-	pallet_balances::GenesisConfig::<Runtime> { balances }
+	pallet_balances::GenesisConfig::<Runtime> { balances, ..Default::default() }
 		.assimilate_storage(&mut t)
 		.unwrap();
 
diff --git a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs
index 56a77094f17..18d9dce9245 100644
--- a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs
+++ b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs
@@ -365,7 +365,7 @@ impl pallet_xcm::Config for TestRuntime {
 pub fn new_test_ext_with_balances(balances: Vec<(AccountId, Balance)>) -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<TestRuntime>::default().build_storage().unwrap();
 
-	pallet_balances::GenesisConfig::<TestRuntime> { balances }
+	pallet_balances::GenesisConfig::<TestRuntime> { balances, ..Default::default() }
 		.assimilate_storage(&mut t)
 		.unwrap();
 
@@ -381,7 +381,7 @@ pub fn new_test_ext_with_balances_and_assets(
 ) -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<TestRuntime>::default().build_storage().unwrap();
 
-	pallet_balances::GenesisConfig::<TestRuntime> { balances }
+	pallet_balances::GenesisConfig::<TestRuntime> { balances, ..Default::default() }
 		.assimilate_storage(&mut t)
 		.unwrap();
 
diff --git a/polkadot/xcm/xcm-simulator/example/src/lib.rs b/polkadot/xcm/xcm-simulator/example/src/lib.rs
index 6fb9a69770e..8a05569831b 100644
--- a/polkadot/xcm/xcm-simulator/example/src/lib.rs
+++ b/polkadot/xcm/xcm-simulator/example/src/lib.rs
@@ -101,6 +101,7 @@ pub fn para_ext(para_id: u32) -> sp_io::TestExternalities {
 
 	pallet_balances::GenesisConfig::<Runtime> {
 		balances: vec![(ALICE, INITIAL_BALANCE), (parent_account_id(), INITIAL_BALANCE)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
@@ -125,6 +126,7 @@ pub fn relay_ext() -> sp_io::TestExternalities {
 			(child_account_id(1), INITIAL_BALANCE),
 			(child_account_id(2), INITIAL_BALANCE),
 		],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/fuzz.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/fuzz.rs
index adf6cacd278..8ea5e033f3a 100644
--- a/polkadot/xcm/xcm-simulator/fuzzer/src/fuzz.rs
+++ b/polkadot/xcm/xcm-simulator/fuzzer/src/fuzz.rs
@@ -117,6 +117,7 @@ pub fn para_ext(para_id: u32) -> sp_io::TestExternalities {
 
 	pallet_balances::GenesisConfig::<Runtime> {
 		balances: (0..6).map(|i| ([i; 32].into(), INITIAL_BALANCE)).collect(),
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
@@ -138,7 +139,7 @@ pub fn relay_ext() -> sp_io::TestExternalities {
 	balances.append(&mut (1..=3).map(|i| (para_account_id(i), INITIAL_BALANCE)).collect());
 	balances.append(&mut (0..6).map(|i| ([i; 32].into(), INITIAL_BALANCE)).collect());
 
-	pallet_balances::GenesisConfig::<Runtime> { balances }
+	pallet_balances::GenesisConfig::<Runtime> { balances, ..Default::default() }
 		.assimilate_storage(&mut t)
 		.unwrap();
 
diff --git a/prdoc/pr_6267.prdoc b/prdoc/pr_6267.prdoc
new file mode 100644
index 00000000000..30ada445625
--- /dev/null
+++ b/prdoc/pr_6267.prdoc
@@ -0,0 +1,171 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Allow configurable number of genesis accounts with specified balances for benchmarking.
+
+doc:
+  - audience: Runtime Dev
+    description: |
+      This pull request adds an additional field `dev_accounts` to the `GenesisConfig`
+      of the balances pallet, feature gated by `runtime-benchmarks`.
+
+      Bringing about an abitrary number of derived dev accounts when building the genesis
+      state. Runtime developers should supply a derivation path that includes an index placeholder
+      (i.e. "//Sender/{}") to generate multiple accounts from the same root in a consistent
+      manner.
+
+crates:
+  - name: substrate-test-runtime
+    bump: minor
+  - name: pallet-vesting
+    bump: patch
+  - name: pallet-utility
+    bump: patch
+  - name: pallet-tx-pause
+    bump: patch
+  - name: pallet-treasury
+    bump: patch
+  - name: pallet-transaction-storage
+    bump: patch
+  - name: pallet-transaction-payment
+    bump: patch
+  - name: pallet-asset-tx-payment
+    bump: patch
+  - name: pallet-asset-conversion-tx-payment
+    bump: patch
+  - name: pallet-tips
+    bump: patch
+  - name: pallet-state-trie-migration
+    bump: patch
+  - name: pallet-staking
+    bump: patch
+  - name: pallet-society
+    bump: patch
+  - name: pallet-safe-mode
+    bump: patch
+  - name: pallet-scored-pool
+    bump: patch
+  - name: pallet-statement
+    bump: patch
+  - name: pallet-root-offences
+    bump: patch
+  - name: pallet-revive
+    bump: patch
+  - name: pallet-revive-mock-network
+    bump: patch
+  - name: pallet-referenda
+    bump: patch
+  - name: pallet-recovery
+    bump: patch
+  - name: pallet-proxy
+    bump: patch
+  - name: pallet-preimage
+    bump: patch
+  - name: pallet-nis
+    bump: patch
+  - name: pallet-nomination-pools-test-delegate-stake
+    bump: minor
+  - name: pallet-multisig
+    bump: patch
+  - name: pallet-lottery
+    bump: patch
+  - name: pallet-indices
+    bump: patch
+  - name: pallet-identity
+    bump: patch
+  - name: pallet-grandpa
+    bump: patch
+  - name: pallet-fast-unstake
+    bump: patch
+  - name: frame-executive
+    bump: patch
+  - name: pallet-elections-phragmen
+    bump: patch
+  - name: pallet-election-provider-e2e-test
+    bump: minor
+  - name: pallet-election-provider-multi-phase
+    bump: patch
+  - name: pallet-democracy
+    bump: patch
+  - name: pallet-delegated-staking
+    bump: patch
+  - name: pallet-conviction-voting
+    bump: patch
+  - name: pallet-contracts
+    bump: patch
+  - name: pallet-contracts-mock-network
+    bump: patch
+  - name: pallet-collective
+    bump: patch
+  - name: pallet-child-bounties
+    bump: patch
+  - name: pallet-bounties
+    bump: patch
+  - name: pallet-beefy
+    bump: patch
+  - name: pallet-balances
+    bump: major
+  - name: pallet-babe
+    bump: patch
+  - name: pallet-asset-conversion
+    bump: patch
+  - name: pallet-asset-conversion-ops
+    bump: patch
+  - name: pallet-asset-rewards
+    bump: patch
+  - name: pallet-atomic-swap
+    bump: patch
+  - name: pallet-alliance
+    bump: patch
+  - name: node-testing
+    bump: minor
+  - name: sc-chain-spec
+    bump: patch
+  - name: staging-chain-spec-builder
+    bump: patch
+  - name: xcm-simulator-fuzzer
+    bump: minor
+  - name: xcm-simulator-fuzzer
+    bump: minor
+  - name: xcm-simulator-example
+    bump: patch
+  - name: xcm-runtime-apis
+    bump: patch
+  - name: staging-xcm-builder
+    bump: patch
+  - name: pallet-xcm
+    bump: patch
+  - name: xcm-docs
+    bump: minor
+  - name: polkadot-runtime-common
+    bump: patch
+  - name: parachains-runtimes-test-utils
+    bump: patch
+  - name: westend-emulated-chain
+    bump: minor
+  - name: rococo-emulated-chain
+    bump: minor
+  - name: penpal-emulated-chain
+    bump: minor
+  - name: people-westend-emulated-chain
+    bump: minor
+  - name: people-rococo-emulated-chain
+    bump: minor
+  - name: coretime-westend-emulated-chain
+    bump: minor
+  - name: coretime-rococo-emulated-chain
+    bump: minor
+  - name: collectives-westend-emulated-chain
+    bump: minor
+  - name: bridge-hub-westend-emulated-chain
+    bump: minor
+  - name: bridge-hub-rococo-emulated-chain
+    bump: minor
+  - name: asset-hub-westend-emulated-chain
+    bump: minor
+  - name: asset-hub-rococo-emulated-chain
+    bump: minor
+  - name: pallet-collator-selection
+    bump: patch
+  - name: pallet-bridge-messages
+    bump: patch
diff --git a/substrate/bin/node/cli/tests/res/default_genesis_config.json b/substrate/bin/node/cli/tests/res/default_genesis_config.json
index a2e52837d88..8ad2428f785 100644
--- a/substrate/bin/node/cli/tests/res/default_genesis_config.json
+++ b/substrate/bin/node/cli/tests/res/default_genesis_config.json
@@ -14,7 +14,8 @@
     "indices": []
   },
   "balances": {
-    "balances": []
+    "balances": [],
+    "devAccounts": null
   },
   "broker": {},
   "transactionPayment": {
diff --git a/substrate/bin/node/testing/src/genesis.rs b/substrate/bin/node/testing/src/genesis.rs
index 0394f6cd739..624b00b4d6c 100644
--- a/substrate/bin/node/testing/src/genesis.rs
+++ b/substrate/bin/node/testing/src/genesis.rs
@@ -47,7 +47,7 @@ pub fn config_endowed(extra_endowed: Vec<AccountId>) -> RuntimeGenesisConfig {
 
 	RuntimeGenesisConfig {
 		indices: IndicesConfig { indices: vec![] },
-		balances: BalancesConfig { balances: endowed },
+		balances: BalancesConfig { balances: endowed, ..Default::default() },
 		session: SessionConfig {
 			keys: vec![
 				(alice(), dave(), session_keys_from_seed(Ed25519Keyring::Alice.into())),
diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/create_default.json b/substrate/bin/utils/chain-spec-builder/tests/expected/create_default.json
index ac67aef9334..77891ac93ea 100644
--- a/substrate/bin/utils/chain-spec-builder/tests/expected/create_default.json
+++ b/substrate/bin/utils/chain-spec-builder/tests/expected/create_default.json
@@ -25,7 +25,8 @@
           }
         },
         "balances": {
-          "balances": []
+          "balances": [],
+          "devAccounts": null
         },
         "substrateTest": {
           "authorities": []
diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/create_parachain.json b/substrate/bin/utils/chain-spec-builder/tests/expected/create_parachain.json
index 7106b4b50dc..22b0ca6571b 100644
--- a/substrate/bin/utils/chain-spec-builder/tests/expected/create_parachain.json
+++ b/substrate/bin/utils/chain-spec-builder/tests/expected/create_parachain.json
@@ -27,7 +27,8 @@
           }
         },
         "balances": {
-          "balances": []
+          "balances": [],
+          "devAccounts": null
         },
         "substrateTest": {
           "authorities": []
diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/create_with_params.json b/substrate/bin/utils/chain-spec-builder/tests/expected/create_with_params.json
index 5aedd5b5c18..641df669e18 100644
--- a/substrate/bin/utils/chain-spec-builder/tests/expected/create_with_params.json
+++ b/substrate/bin/utils/chain-spec-builder/tests/expected/create_with_params.json
@@ -25,7 +25,8 @@
           }
         },
         "balances": {
-          "balances": []
+          "balances": [],
+          "devAccounts": null
         },
         "substrateTest": {
           "authorities": []
diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_default.json b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_default.json
index 203b6716cb2..e5957624ead 100644
--- a/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_default.json
+++ b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/create_default.json
@@ -24,7 +24,8 @@
           }
         },
         "balances": {
-          "balances": []
+          "balances": [],
+          "devAccounts": null
         },
         "substrateTest": {
           "authorities": []
diff --git a/substrate/bin/utils/chain-spec-builder/tests/expected/doc/display_preset.json b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/display_preset.json
index 6aa6799af77..6bbb475d35c 100644
--- a/substrate/bin/utils/chain-spec-builder/tests/expected/doc/display_preset.json
+++ b/substrate/bin/utils/chain-spec-builder/tests/expected/doc/display_preset.json
@@ -1 +1 @@
-{"babe":{"authorities":[],"epochConfig":{"allowed_slots":"PrimaryAndSecondaryVRFSlots","c":[1,4]}},"balances":{"balances":[]},"substrateTest":{"authorities":[]},"system":{}}
+{"babe":{"authorities":[],"epochConfig":{"allowed_slots":"PrimaryAndSecondaryVRFSlots","c":[1,4]}},"balances":{"balances":[], "devAccounts": null},"substrateTest":{"authorities":[]},"system":{}}
diff --git a/substrate/client/chain-spec/src/genesis_config_builder.rs b/substrate/client/chain-spec/src/genesis_config_builder.rs
index 5fe8f9dc053..c7b5ae4bf16 100644
--- a/substrate/client/chain-spec/src/genesis_config_builder.rs
+++ b/substrate/client/chain-spec/src/genesis_config_builder.rs
@@ -196,7 +196,7 @@ mod tests {
 			<GenesisConfigBuilderRuntimeCaller>::new(substrate_test_runtime::wasm_binary_unwrap())
 				.get_default_config()
 				.unwrap();
-		let expected = r#"{"babe": {"authorities": [], "epochConfig": {"allowed_slots": "PrimaryAndSecondaryVRFSlots", "c": [1, 4]}}, "balances": {"balances": []}, "substrateTest": {"authorities": []}, "system": {}}"#;
+		let expected = r#"{"babe": {"authorities": [], "epochConfig": {"allowed_slots": "PrimaryAndSecondaryVRFSlots", "c": [1, 4]}}, "balances": {"balances": [], "devAccounts": null}, "substrateTest": {"authorities": []}, "system": {}}"#;
 		assert_eq!(from_str::<Value>(expected).unwrap(), config);
 	}
 
diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs
index 625cabf3457..069c29a88d3 100644
--- a/substrate/frame/alliance/src/mock.rs
+++ b/substrate/frame/alliance/src/mock.rs
@@ -283,6 +283,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 			(8, 1000),
 			(9, 1000),
 		],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/asset-conversion/ops/src/mock.rs b/substrate/frame/asset-conversion/ops/src/mock.rs
index 5c05faa6aa8..576b266b39c 100644
--- a/substrate/frame/asset-conversion/ops/src/mock.rs
+++ b/substrate/frame/asset-conversion/ops/src/mock.rs
@@ -135,6 +135,7 @@ pub(crate) fn new_test_ext() -> sp_io::TestExternalities {
 
 	pallet_balances::GenesisConfig::<Test> {
 		balances: vec![(1, 10000), (2, 20000), (3, 30000), (4, 40000)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/asset-conversion/src/mock.rs b/substrate/frame/asset-conversion/src/mock.rs
index d8832d70488..313d9f9857e 100644
--- a/substrate/frame/asset-conversion/src/mock.rs
+++ b/substrate/frame/asset-conversion/src/mock.rs
@@ -162,6 +162,7 @@ pub(crate) fn new_test_ext() -> sp_io::TestExternalities {
 
 	pallet_balances::GenesisConfig::<Test> {
 		balances: vec![(1, 10000), (2, 20000), (3, 30000), (4, 40000)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/asset-rewards/src/mock.rs b/substrate/frame/asset-rewards/src/mock.rs
index 87c8a8a0dea..1e9b41104d4 100644
--- a/substrate/frame/asset-rewards/src/mock.rs
+++ b/substrate/frame/asset-rewards/src/mock.rs
@@ -211,6 +211,7 @@ pub(crate) fn new_test_ext() -> sp_io::TestExternalities {
 			(20, 40000),
 			(pool_zero_account_id, 100_000), // Top up the default pool account id
 		],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/atomic-swap/src/tests.rs b/substrate/frame/atomic-swap/src/tests.rs
index 6fcc5571a52..d6384fab343 100644
--- a/substrate/frame/atomic-swap/src/tests.rs
+++ b/substrate/frame/atomic-swap/src/tests.rs
@@ -54,7 +54,10 @@ const B: u64 = 2;
 
 pub fn new_test_ext() -> TestExternalities {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
-	let genesis = pallet_balances::GenesisConfig::<Test> { balances: vec![(A, 100), (B, 200)] };
+	let genesis = pallet_balances::GenesisConfig::<Test> {
+		balances: vec![(A, 100), (B, 200)],
+		..Default::default()
+	};
 	genesis.assimilate_storage(&mut t).unwrap();
 	t.into()
 }
diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs
index 8d00509e800..6f9f54cc7ef 100644
--- a/substrate/frame/babe/src/mock.rs
+++ b/substrate/frame/babe/src/mock.rs
@@ -314,7 +314,7 @@ pub fn new_test_ext_raw_authorities(authorities: Vec<AuthorityId>) -> sp_io::Tes
 
 	let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect();
 
-	pallet_balances::GenesisConfig::<Test> { balances }
+	pallet_balances::GenesisConfig::<Test> { balances, ..Default::default() }
 		.assimilate_storage(&mut t)
 		.unwrap();
 
diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml
index 03bc7fcb3fc..4255ed41436 100644
--- a/substrate/frame/balances/Cargo.toml
+++ b/substrate/frame/balances/Cargo.toml
@@ -23,13 +23,15 @@ frame-support = { workspace = true }
 frame-system = { workspace = true }
 log = { workspace = true }
 scale-info = { features = ["derive"], workspace = true }
+sp-core = { workspace = true }
 sp-runtime = { workspace = true }
 
 [dev-dependencies]
-frame-support = { features = ["experimental"], workspace = true, default-features = true }
+frame-support = { features = [
+	"experimental",
+], workspace = true, default-features = true }
 pallet-transaction-payment = { workspace = true, default-features = true }
 paste = { workspace = true, default-features = true }
-sp-core = { workspace = true, default-features = true }
 sp-io = { workspace = true, default-features = true }
 
 [features]
diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs
index 9d740145210..e994f05a77c 100644
--- a/substrate/frame/balances/src/lib.rs
+++ b/substrate/frame/balances/src/lib.rs
@@ -152,7 +152,11 @@ pub mod weights;
 
 extern crate alloc;
 
-use alloc::vec::Vec;
+use alloc::{
+	format,
+	string::{String, ToString},
+	vec::Vec,
+};
 use codec::{Codec, MaxEncodedLen};
 use core::{cmp, fmt::Debug, mem, result};
 use frame_support::{
@@ -173,6 +177,7 @@ use frame_support::{
 use frame_system as system;
 pub use impl_currency::{NegativeImbalance, PositiveImbalance};
 use scale_info::TypeInfo;
+use sp_core::{sr25519::Pair as SrPair, Pair};
 use sp_runtime::{
 	traits::{
 		AtLeast32BitUnsigned, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, Saturating,
@@ -180,6 +185,7 @@ use sp_runtime::{
 	},
 	ArithmeticError, DispatchError, FixedPointOperand, Perbill, RuntimeDebug, TokenError,
 };
+
 pub use types::{
 	AccountData, AdjustmentDirection, BalanceLock, DustCleaner, ExtraFlags, Reasons, ReserveData,
 };
@@ -189,6 +195,9 @@ pub use pallet::*;
 
 const LOG_TARGET: &str = "runtime::balances";
 
+// Default derivation(hard) for development accounts.
+const DEFAULT_ADDRESS_URI: &str = "//Sender//{}";
+
 type AccountIdLookupOf<T> = <<T as frame_system::Config>::Lookup as StaticLookup>::Source;
 
 #[frame_support::pallet]
@@ -505,11 +514,18 @@ pub mod pallet {
 	#[pallet::genesis_config]
 	pub struct GenesisConfig<T: Config<I>, I: 'static = ()> {
 		pub balances: Vec<(T::AccountId, T::Balance)>,
+		/// Derived development accounts(Optional):
+		/// - `u32`: The number of development accounts to generate.
+		/// - `T::Balance`: The initial balance assigned to each development account.
+		/// - `String`: An optional derivation(hard) string template.
+		/// - Must include `{}` as a placeholder for account indices.
+		/// - Defaults to `"//Sender//{}`" if `None`.
+		pub dev_accounts: Option<(u32, T::Balance, Option<String>)>,
 	}
 
 	impl<T: Config<I>, I: 'static> Default for GenesisConfig<T, I> {
 		fn default() -> Self {
-			Self { balances: Default::default() }
+			Self { balances: Default::default(), dev_accounts: None }
 		}
 	}
 
@@ -540,6 +556,15 @@ pub mod pallet {
 				"duplicate balances in genesis."
 			);
 
+			// Generate additional dev accounts.
+			if let Some((num_accounts, balance, ref derivation)) = self.dev_accounts {
+				// Using the provided derivation string or default to `"//Sender//{}`".
+				Pallet::<T, I>::derive_dev_account(
+					num_accounts,
+					balance,
+					derivation.as_deref().unwrap_or(DEFAULT_ADDRESS_URI),
+				);
+			}
 			for &(ref who, free) in self.balances.iter() {
 				frame_system::Pallet::<T>::inc_providers(who);
 				assert!(T::AccountStore::insert(who, AccountData { free, ..Default::default() })
@@ -1248,5 +1273,40 @@ pub mod pallet {
 			});
 			Ok(actual)
 		}
+
+		/// Generate dev account from derivation(hard) string.
+		pub fn derive_dev_account(num_accounts: u32, balance: T::Balance, derivation: &str) {
+			// Ensure that the number of accounts is not zero.
+			assert!(num_accounts > 0, "num_accounts must be greater than zero");
+
+			assert!(
+				balance >= <T as Config<I>>::ExistentialDeposit::get(),
+				"the balance of any account should always be at least the existential deposit.",
+			);
+
+			assert!(
+				derivation.contains("{}"),
+				"Invalid derivation, expected `{{}}` as part of the derivation"
+			);
+
+			for index in 0..num_accounts {
+				// Replace "{}" in the derivation string with the index.
+				let derivation_string = derivation.replace("{}", &index.to_string());
+
+				// Generate the key pair from the derivation string using sr25519.
+				let pair: SrPair = Pair::from_string(&derivation_string, None)
+					.expect(&format!("Failed to parse derivation string: {derivation_string}"));
+
+				// Convert the public key to AccountId.
+				let who = T::AccountId::decode(&mut &pair.public().encode()[..])
+					.expect(&format!("Failed to decode public key from pair: {:?}", pair.public()));
+
+				// Set the balance for the generated account.
+				Self::mutate_account_handling_dust(&who, |account| {
+					account.free = balance;
+				})
+				.expect(&format!("Failed to add account to keystore: {:?}", who));
+			}
+		}
 	}
 }
diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs
index 5ad818e5bfa..a6377c3ad72 100644
--- a/substrate/frame/balances/src/tests/currency_tests.rs
+++ b/substrate/frame/balances/src/tests/currency_tests.rs
@@ -721,7 +721,7 @@ fn burn_must_work() {
 fn cannot_set_genesis_value_below_ed() {
 	EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = 11);
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
-	let _ = crate::GenesisConfig::<Test> { balances: vec![(1, 10)] }
+	let _ = crate::GenesisConfig::<Test> { balances: vec![(1, 10)], ..Default::default() }
 		.assimilate_storage(&mut t)
 		.unwrap();
 }
@@ -730,9 +730,12 @@ fn cannot_set_genesis_value_below_ed() {
 #[should_panic = "duplicate balances in genesis."]
 fn cannot_set_genesis_value_twice() {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
-	let _ = crate::GenesisConfig::<Test> { balances: vec![(1, 10), (2, 20), (1, 15)] }
-		.assimilate_storage(&mut t)
-		.unwrap();
+	let _ = crate::GenesisConfig::<Test> {
+		balances: vec![(1, 10), (2, 20), (1, 15)],
+		..Default::default()
+	}
+	.assimilate_storage(&mut t)
+	.unwrap();
 }
 
 #[test]
diff --git a/substrate/frame/balances/src/tests/mod.rs b/substrate/frame/balances/src/tests/mod.rs
index bf49ad9f0a1..ceb8e8134f0 100644
--- a/substrate/frame/balances/src/tests/mod.rs
+++ b/substrate/frame/balances/src/tests/mod.rs
@@ -19,7 +19,10 @@
 
 #![cfg(test)]
 
-use crate::{self as pallet_balances, AccountData, Config, CreditOf, Error, Pallet, TotalIssuance};
+use crate::{
+	self as pallet_balances, AccountData, Config, CreditOf, Error, Pallet, TotalIssuance,
+	DEFAULT_ADDRESS_URI,
+};
 use codec::{Decode, Encode, MaxEncodedLen};
 use frame_support::{
 	assert_err, assert_noop, assert_ok, assert_storage_noop, derive_impl,
@@ -34,7 +37,7 @@ use frame_support::{
 use frame_system::{self as system, RawOrigin};
 use pallet_transaction_payment::{ChargeTransactionPayment, FungibleAdapter, Multiplier};
 use scale_info::TypeInfo;
-use sp_core::hexdisplay::HexDisplay;
+use sp_core::{hexdisplay::HexDisplay, sr25519::Pair as SrPair, Pair};
 use sp_io;
 use sp_runtime::{
 	traits::{BadOrigin, Zero},
@@ -169,6 +172,11 @@ impl ExtBuilder {
 			} else {
 				vec![]
 			},
+			dev_accounts: Some((
+				1000,
+				self.existential_deposit,
+				Some(DEFAULT_ADDRESS_URI.to_string()),
+			)),
 		}
 		.assimilate_storage(&mut t)
 		.unwrap();
@@ -281,7 +289,32 @@ pub fn info_from_weight(w: Weight) -> DispatchInfo {
 pub fn ensure_ti_valid() {
 	let mut sum = 0;
 
+	// Fetch the dev accounts from Account Storage.
+	let dev_accounts = (1000, EXISTENTIAL_DEPOSIT, DEFAULT_ADDRESS_URI.to_string());
+	let (num_accounts, _balance, ref derivation) = dev_accounts;
+
+	// Generate the dev account public keys.
+	let dev_account_ids: Vec<_> = (0..num_accounts)
+		.map(|index| {
+			let derivation_string = derivation.replace("{}", &index.to_string());
+			let pair: SrPair =
+				Pair::from_string(&derivation_string, None).expect("Invalid derivation string");
+			<crate::tests::Test as frame_system::Config>::AccountId::decode(
+				&mut &pair.public().encode()[..],
+			)
+			.unwrap()
+		})
+		.collect();
+
+	// Iterate over all account keys (i.e., the account IDs).
 	for acc in frame_system::Account::<Test>::iter_keys() {
+		// Skip dev accounts by checking if the account is in the dev_account_ids list.
+		// This also proves dev_accounts exists in storage.
+		if dev_account_ids.contains(&acc) {
+			continue;
+		}
+
+		// Check if we are using the system pallet or some other custom storage for accounts.
 		if UseSystem::get() {
 			let data = frame_system::Pallet::<Test>::account(acc);
 			sum += data.data.total();
@@ -291,7 +324,8 @@ pub fn ensure_ti_valid() {
 		}
 	}
 
-	assert_eq!(TotalIssuance::<Test>::get(), sum, "Total Issuance wrong");
+	// Ensure the total issuance matches the sum of the account balances
+	assert_eq!(TotalIssuance::<Test>::get(), sum, "Total Issuance is incorrect");
 }
 
 #[test]
diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs
index 4b5f1d103b5..fc731e3bc50 100644
--- a/substrate/frame/beefy/src/mock.rs
+++ b/substrate/frame/beefy/src/mock.rs
@@ -282,7 +282,7 @@ impl ExtBuilder {
 		let balances: Vec<_> =
 			(0..self.authorities.len()).map(|i| (i as u64, 10_000_000)).collect();
 
-		pallet_balances::GenesisConfig::<Test> { balances }
+		pallet_balances::GenesisConfig::<Test> { balances, ..Default::default() }
 			.assimilate_storage(&mut t)
 			.unwrap();
 
diff --git a/substrate/frame/bounties/src/tests.rs b/substrate/frame/bounties/src/tests.rs
index 447d0edb412..c9f6c1319ed 100644
--- a/substrate/frame/bounties/src/tests.rs
+++ b/substrate/frame/bounties/src/tests.rs
@@ -187,7 +187,10 @@ impl ExtBuilder {
 	pub fn build(self) -> sp_io::TestExternalities {
 		let mut ext: sp_io::TestExternalities = RuntimeGenesisConfig {
 			system: frame_system::GenesisConfig::default(),
-			balances: pallet_balances::GenesisConfig { balances: vec![(0, 100), (1, 98), (2, 1)] },
+			balances: pallet_balances::GenesisConfig {
+				balances: vec![(0, 100), (1, 98), (2, 1)],
+				..Default::default()
+			},
 			treasury: Default::default(),
 			treasury_1: Default::default(),
 		}
@@ -338,9 +341,12 @@ fn treasury_account_doesnt_get_deleted() {
 #[allow(deprecated)]
 fn inexistent_account_works() {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
-	pallet_balances::GenesisConfig::<Test> { balances: vec![(0, 100), (1, 99), (2, 1)] }
-		.assimilate_storage(&mut t)
-		.unwrap();
+	pallet_balances::GenesisConfig::<Test> {
+		balances: vec![(0, 100), (1, 99), (2, 1)],
+		..Default::default()
+	}
+	.assimilate_storage(&mut t)
+	.unwrap();
 	// Treasury genesis config is not build thus treasury account does not exist
 	let mut t: sp_io::TestExternalities = t.into();
 
@@ -977,6 +983,7 @@ fn genesis_funding_works() {
 	pallet_balances::GenesisConfig::<Test> {
 		// Total issuance will be 200 with treasury account initialized with 100.
 		balances: vec![(0, 100), (Treasury::account_id(), initial_funding)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/child-bounties/src/tests.rs b/substrate/frame/child-bounties/src/tests.rs
index 939983054f6..50c8adb453e 100644
--- a/substrate/frame/child-bounties/src/tests.rs
+++ b/substrate/frame/child-bounties/src/tests.rs
@@ -148,6 +148,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	pallet_balances::GenesisConfig::<Test> {
 		// Total issuance will be 200 with treasury account initialized at ED.
 		balances: vec![(account_id(0), 100), (account_id(1), 98), (account_id(2), 1)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/collective/src/tests.rs b/substrate/frame/collective/src/tests.rs
index c4ed17821ae..300d5ad3772 100644
--- a/substrate/frame/collective/src/tests.rs
+++ b/substrate/frame/collective/src/tests.rs
@@ -203,7 +203,10 @@ impl ExtBuilder {
 		let mut ext: sp_io::TestExternalities = RuntimeGenesisConfig {
 			system: frame_system::GenesisConfig::default(),
 			// balances: pallet_balances::GenesisConfig::default(),
-			balances: pallet_balances::GenesisConfig { balances: vec![(1, 100), (2, 200)] },
+			balances: pallet_balances::GenesisConfig {
+				balances: vec![(1, 100), (2, 200)],
+				..Default::default()
+			},
 			collective: pallet_collective::GenesisConfig {
 				members: self.collective_members,
 				phantom: Default::default(),
diff --git a/substrate/frame/contracts/mock-network/src/lib.rs b/substrate/frame/contracts/mock-network/src/lib.rs
index cb9e22439b7..c918cd39ed9 100644
--- a/substrate/frame/contracts/mock-network/src/lib.rs
+++ b/substrate/frame/contracts/mock-network/src/lib.rs
@@ -99,6 +99,7 @@ pub fn para_ext(para_id: u32) -> sp_io::TestExternalities {
 			(relay_sovereign_account_id(), INITIAL_BALANCE),
 			(BOB, INITIAL_BALANCE),
 		],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
@@ -137,6 +138,7 @@ pub fn relay_ext() -> sp_io::TestExternalities {
 			(parachain_sovereign_account_id(1), INITIAL_BALANCE),
 			(parachain_account_sovereign_account_id(1, ALICE), INITIAL_BALANCE),
 		],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs
index b01d0aa4fa4..9bba55f82b4 100644
--- a/substrate/frame/contracts/src/tests.rs
+++ b/substrate/frame/contracts/src/tests.rs
@@ -553,7 +553,7 @@ impl ExtBuilder {
 		sp_tracing::try_init_simple();
 		self.set_associated_consts();
 		let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
-		pallet_balances::GenesisConfig::<Test> { balances: vec![] }
+		pallet_balances::GenesisConfig::<Test> { balances: vec![], ..Default::default() }
 			.assimilate_storage(&mut t)
 			.unwrap();
 		let mut ext = sp_io::TestExternalities::new(t);
diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs
index dd9ee33ee18..b1b1fab5ae5 100644
--- a/substrate/frame/conviction-voting/src/tests.rs
+++ b/substrate/frame/conviction-voting/src/tests.rs
@@ -160,6 +160,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 	pallet_balances::GenesisConfig::<Test> {
 		balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/delegated-staking/src/mock.rs b/substrate/frame/delegated-staking/src/mock.rs
index 875279864f7..a4546e57dab 100644
--- a/substrate/frame/delegated-staking/src/mock.rs
+++ b/substrate/frame/delegated-staking/src/mock.rs
@@ -189,6 +189,7 @@ impl ExtBuilder {
 				(GENESIS_NOMINATOR_ONE, 1000),
 				(GENESIS_NOMINATOR_TWO, 2000),
 			],
+			..Default::default()
 		}
 		.assimilate_storage(&mut storage);
 
diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs
index 10e5ee75611..77774480068 100644
--- a/substrate/frame/democracy/src/tests.rs
+++ b/substrate/frame/democracy/src/tests.rs
@@ -169,6 +169,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 	pallet_balances::GenesisConfig::<Test> {
 		balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs
index 2e5ac252720..d0797e100fc 100644
--- a/substrate/frame/election-provider-multi-phase/src/mock.rs
+++ b/substrate/frame/election-provider-multi-phase/src/mock.rs
@@ -600,6 +600,7 @@ impl ExtBuilder {
 				(999, 100),
 				(9999, 100),
 			],
+			..Default::default()
 		}
 		.assimilate_storage(&mut storage);
 
diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
index bcb25f8287b..3a649643618 100644
--- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
+++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs
@@ -567,6 +567,7 @@ impl ExtBuilder {
 
 		let _ = pallet_balances::GenesisConfig::<Runtime> {
 			balances: self.balances_builder.balances.clone(),
+			..Default::default()
 		}
 		.assimilate_storage(&mut storage);
 
diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs
index fa1c48ee65e..4a40d44e407 100644
--- a/substrate/frame/elections-phragmen/src/lib.rs
+++ b/substrate/frame/elections-phragmen/src/lib.rs
@@ -1476,6 +1476,7 @@ mod tests {
 						(5, 50 * self.balance_factor),
 						(6, 60 * self.balance_factor),
 					],
+					..Default::default()
 				},
 				elections: elections_phragmen::GenesisConfig::<Test> {
 					members: self.genesis_members,
diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs
index 882d875f3d8..dd12a85a111 100644
--- a/substrate/frame/executive/src/tests.rs
+++ b/substrate/frame/executive/src/tests.rs
@@ -576,7 +576,7 @@ fn call_transfer(dest: u64, value: u64) -> RuntimeCall {
 #[test]
 fn balance_transfer_dispatch_works() {
 	let mut t = frame_system::GenesisConfig::<Runtime>::default().build_storage().unwrap();
-	pallet_balances::GenesisConfig::<Runtime> { balances: vec![(1, 211)] }
+	pallet_balances::GenesisConfig::<Runtime> { balances: vec![(1, 211)], ..Default::default() }
 		.assimilate_storage(&mut t)
 		.unwrap();
 	let xt = UncheckedXt::new_signed(call_transfer(2, 69), 1, 1.into(), tx_ext(0, 0));
@@ -598,9 +598,12 @@ fn balance_transfer_dispatch_works() {
 
 fn new_test_ext(balance_factor: Balance) -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<Runtime>::default().build_storage().unwrap();
-	pallet_balances::GenesisConfig::<Runtime> { balances: vec![(1, 111 * balance_factor)] }
-		.assimilate_storage(&mut t)
-		.unwrap();
+	pallet_balances::GenesisConfig::<Runtime> {
+		balances: vec![(1, 111 * balance_factor)],
+		..Default::default()
+	}
+	.assimilate_storage(&mut t)
+	.unwrap();
 	let mut ext: sp_io::TestExternalities = t.into();
 	ext.execute_with(|| {
 		SystemCallbacksCalled::set(0);
@@ -610,9 +613,12 @@ fn new_test_ext(balance_factor: Balance) -> sp_io::TestExternalities {
 
 fn new_test_ext_v0(balance_factor: Balance) -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<Runtime>::default().build_storage().unwrap();
-	pallet_balances::GenesisConfig::<Runtime> { balances: vec![(1, 111 * balance_factor)] }
-		.assimilate_storage(&mut t)
-		.unwrap();
+	pallet_balances::GenesisConfig::<Runtime> {
+		balances: vec![(1, 111 * balance_factor)],
+		..Default::default()
+	}
+	.assimilate_storage(&mut t)
+	.unwrap();
 	(t, sp_runtime::StateVersion::V0).into()
 }
 
diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs
index cf4f5f49240..67f7ee21e61 100644
--- a/substrate/frame/fast-unstake/src/mock.rs
+++ b/substrate/frame/fast-unstake/src/mock.rs
@@ -228,6 +228,7 @@ impl ExtBuilder {
 				.chain(validators_range.clone().map(|x| (x, 7 + 1 + 100)))
 				.chain(nominators_range.clone().map(|x| (x, 7 + 1 + 100)))
 				.collect::<Vec<_>>(),
+			..Default::default()
 		}
 		.assimilate_storage(&mut storage);
 
diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs
index 0a85d9ffd2b..cb754fb7955 100644
--- a/substrate/frame/grandpa/src/mock.rs
+++ b/substrate/frame/grandpa/src/mock.rs
@@ -226,7 +226,7 @@ pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestEx
 
 	let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect();
 
-	pallet_balances::GenesisConfig::<Test> { balances }
+	pallet_balances::GenesisConfig::<Test> { balances, ..Default::default() }
 		.assimilate_storage(&mut t)
 		.unwrap();
 
diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs
index 01bc312723a..c4c02a2834a 100644
--- a/substrate/frame/identity/src/tests.rs
+++ b/substrate/frame/identity/src/tests.rs
@@ -105,6 +105,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 			(account(20), 1000),
 			(account(30), 1000),
 		],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/indices/src/mock.rs b/substrate/frame/indices/src/mock.rs
index 72bbc6dab4a..80d0a88881f 100644
--- a/substrate/frame/indices/src/mock.rs
+++ b/substrate/frame/indices/src/mock.rs
@@ -59,6 +59,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 	pallet_balances::GenesisConfig::<Test> {
 		balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/lottery/src/mock.rs b/substrate/frame/lottery/src/mock.rs
index b771ed0849f..ea3f69b6cfc 100644
--- a/substrate/frame/lottery/src/mock.rs
+++ b/substrate/frame/lottery/src/mock.rs
@@ -75,6 +75,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 	pallet_balances::GenesisConfig::<Test> {
 		balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/multisig/src/tests.rs b/substrate/frame/multisig/src/tests.rs
index 4065ce73f90..8a389314256 100644
--- a/substrate/frame/multisig/src/tests.rs
+++ b/substrate/frame/multisig/src/tests.rs
@@ -75,6 +75,7 @@ pub fn new_test_ext() -> TestState {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 	pallet_balances::GenesisConfig::<Test> {
 		balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/nis/src/mock.rs b/substrate/frame/nis/src/mock.rs
index 08e69ef0de0..82b9f55b919 100644
--- a/substrate/frame/nis/src/mock.rs
+++ b/substrate/frame/nis/src/mock.rs
@@ -133,6 +133,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 	pallet_balances::GenesisConfig::<Test, Instance1> {
 		balances: vec![(1, 100), (2, 100), (3, 100), (4, 100)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs
index d943ba6f533..7aa8019b9c4 100644
--- a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs
+++ b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs
@@ -314,6 +314,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 
 	let _ = pallet_balances::GenesisConfig::<Runtime> {
 		balances: vec![(10, 100), (20, 100), (21, 100), (22, 100)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut storage)
 	.unwrap();
diff --git a/substrate/frame/preimage/src/mock.rs b/substrate/frame/preimage/src/mock.rs
index 9c72d09cae1..dec590c6a19 100644
--- a/substrate/frame/preimage/src/mock.rs
+++ b/substrate/frame/preimage/src/mock.rs
@@ -81,6 +81,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 	let balances = pallet_balances::GenesisConfig::<Test> {
 		balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)],
+		..Default::default()
 	};
 	balances.assimilate_storage(&mut t).unwrap();
 	t.into()
diff --git a/substrate/frame/proxy/src/tests.rs b/substrate/frame/proxy/src/tests.rs
index afc668188e6..14389b03ac7 100644
--- a/substrate/frame/proxy/src/tests.rs
+++ b/substrate/frame/proxy/src/tests.rs
@@ -133,6 +133,7 @@ pub fn new_test_ext() -> TestState {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 	pallet_balances::GenesisConfig::<Test> {
 		balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 3)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/recovery/src/mock.rs b/substrate/frame/recovery/src/mock.rs
index 86f13b0da4f..446d507a414 100644
--- a/substrate/frame/recovery/src/mock.rs
+++ b/substrate/frame/recovery/src/mock.rs
@@ -78,6 +78,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 	pallet_balances::GenesisConfig::<Test> {
 		balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/referenda/src/mock.rs b/substrate/frame/referenda/src/mock.rs
index c96a50af865..5d36ce137d4 100644
--- a/substrate/frame/referenda/src/mock.rs
+++ b/substrate/frame/referenda/src/mock.rs
@@ -219,7 +219,7 @@ impl ExtBuilder {
 	pub fn build(self) -> sp_io::TestExternalities {
 		let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 		let balances = vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100), (6, 100)];
-		pallet_balances::GenesisConfig::<Test> { balances }
+		pallet_balances::GenesisConfig::<Test> { balances, ..Default::default() }
 			.assimilate_storage(&mut t)
 			.unwrap();
 		let mut ext = sp_io::TestExternalities::new(t);
diff --git a/substrate/frame/revive/mock-network/src/lib.rs b/substrate/frame/revive/mock-network/src/lib.rs
index adfd0016b4d..b8c9bc13aa7 100644
--- a/substrate/frame/revive/mock-network/src/lib.rs
+++ b/substrate/frame/revive/mock-network/src/lib.rs
@@ -99,6 +99,7 @@ pub fn para_ext(para_id: u32) -> sp_io::TestExternalities {
 			(relay_sovereign_account_id(), INITIAL_BALANCE),
 			(BOB, INITIAL_BALANCE),
 		],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
@@ -137,6 +138,7 @@ pub fn relay_ext() -> sp_io::TestExternalities {
 			(parachain_sovereign_account_id(1), INITIAL_BALANCE),
 			(parachain_account_sovereign_account_id(1, ALICE), INITIAL_BALANCE),
 		],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs
index 90b9f053a03..d8b60e38da5 100644
--- a/substrate/frame/revive/src/tests.rs
+++ b/substrate/frame/revive/src/tests.rs
@@ -566,7 +566,7 @@ impl ExtBuilder {
 		sp_tracing::try_init_simple();
 		self.set_associated_consts();
 		let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
-		pallet_balances::GenesisConfig::<Test> { balances: vec![] }
+		pallet_balances::GenesisConfig::<Test> { balances: vec![], ..Default::default() }
 			.assimilate_storage(&mut t)
 			.unwrap();
 		let mut ext = sp_io::TestExternalities::new(t);
diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs
index 3f14dc00b56..9b319cabb09 100644
--- a/substrate/frame/root-offences/src/mock.rs
+++ b/substrate/frame/root-offences/src/mock.rs
@@ -212,6 +212,7 @@ impl ExtBuilder {
 				(31, self.balance_factor * 1000),
 				(41, self.balance_factor * 2000),
 			],
+			..Default::default()
 		}
 		.assimilate_storage(&mut storage)
 		.unwrap();
diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs
index aaf3456272f..2980f86abc2 100644
--- a/substrate/frame/safe-mode/src/mock.rs
+++ b/substrate/frame/safe-mode/src/mock.rs
@@ -233,6 +233,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	pallet_balances::GenesisConfig::<Test> {
 		// The 0 account is NOT a special origin, the rest may be.
 		balances: vec![(0, BAL_ACC0), (1, BAL_ACC1), (2, 5678), (3, 5678), (4, 5678)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/scored-pool/src/mock.rs b/substrate/frame/scored-pool/src/mock.rs
index 7708c06e56b..5eb9df52892 100644
--- a/substrate/frame/scored-pool/src/mock.rs
+++ b/substrate/frame/scored-pool/src/mock.rs
@@ -109,7 +109,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	balances.push((40, 500_000));
 	balances.push((99, 1));
 
-	pallet_balances::GenesisConfig::<Test> { balances }
+	pallet_balances::GenesisConfig::<Test> { balances, ..Default::default() }
 		.assimilate_storage(&mut t)
 		.unwrap();
 	pallet_scored_pool::GenesisConfig::<Test> {
diff --git a/substrate/frame/society/src/mock.rs b/substrate/frame/society/src/mock.rs
index 8cb5dc82375..63fc5059279 100644
--- a/substrate/frame/society/src/mock.rs
+++ b/substrate/frame/society/src/mock.rs
@@ -115,7 +115,7 @@ impl EnvBuilder {
 	pub fn execute<R, F: FnOnce() -> R>(mut self, f: F) -> R {
 		let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 		self.balances.push((Society::account_id(), self.balance.max(self.pot)));
-		pallet_balances::GenesisConfig::<Test> { balances: self.balances }
+		pallet_balances::GenesisConfig::<Test> { balances: self.balances, ..Default::default() }
 			.assimilate_storage(&mut t)
 			.unwrap();
 		pallet_society::GenesisConfig::<Test> { pot: self.pot }
diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs
index 6346949576f..41fb3a31d52 100644
--- a/substrate/frame/staking/src/mock.rs
+++ b/substrate/frame/staking/src/mock.rs
@@ -471,6 +471,7 @@ impl ExtBuilder {
 				// This allows us to have a total_payout different from 0.
 				(999, 1_000_000_000_000),
 			],
+			..Default::default()
 		}
 		.assimilate_storage(&mut storage);
 
diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs
index 1dc1a3928f2..6e475b7067e 100644
--- a/substrate/frame/state-trie-migration/src/lib.rs
+++ b/substrate/frame/state-trie-migration/src/lib.rs
@@ -1297,9 +1297,12 @@ mod mock {
 			frame_system::GenesisConfig::<Test>::default()
 				.assimilate_storage(&mut custom_storage)
 				.unwrap();
-			pallet_balances::GenesisConfig::<Test> { balances: vec![(1, 1000)] }
-				.assimilate_storage(&mut custom_storage)
-				.unwrap();
+			pallet_balances::GenesisConfig::<Test> {
+				balances: vec![(1, 1000)],
+				..Default::default()
+			}
+			.assimilate_storage(&mut custom_storage)
+			.unwrap();
 		}
 
 		sp_tracing::try_init_simple();
diff --git a/substrate/frame/statement/src/mock.rs b/substrate/frame/statement/src/mock.rs
index 34afd332c08..db9d19dbbab 100644
--- a/substrate/frame/statement/src/mock.rs
+++ b/substrate/frame/statement/src/mock.rs
@@ -82,6 +82,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 				500000,
 			),
 		],
+		..Default::default()
 	};
 	balances.assimilate_storage(&mut t).unwrap();
 	t.into()
diff --git a/substrate/frame/tips/src/tests.rs b/substrate/frame/tips/src/tests.rs
index 530efb708e4..b769ea5b3e7 100644
--- a/substrate/frame/tips/src/tests.rs
+++ b/substrate/frame/tips/src/tests.rs
@@ -180,7 +180,10 @@ impl Config<Instance1> for Test {
 pub fn new_test_ext() -> sp_io::TestExternalities {
 	let mut ext: sp_io::TestExternalities = RuntimeGenesisConfig {
 		system: frame_system::GenesisConfig::default(),
-		balances: pallet_balances::GenesisConfig { balances: vec![(0, 100), (1, 98), (2, 1)] },
+		balances: pallet_balances::GenesisConfig {
+			balances: vec![(0, 100), (1, 98), (2, 1)],
+			..Default::default()
+		},
 		treasury: Default::default(),
 		treasury_1: Default::default(),
 	}
@@ -583,6 +586,7 @@ fn genesis_funding_works() {
 	pallet_balances::GenesisConfig::<Test> {
 		// Total issuance will be 200 with treasury account initialized with 100.
 		balances: vec![(0, 100), (Treasury::account_id(), initial_funding)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs
index 6ce4652fd42..76d46aa1647 100644
--- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs
+++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs
@@ -86,6 +86,7 @@ impl ExtBuilder {
 			} else {
 				vec![]
 			},
+			..Default::default()
 		}
 		.assimilate_storage(&mut t)
 		.unwrap();
diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs
index 6de2e8e7da5..2aa5d8ec7be 100644
--- a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs
+++ b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs
@@ -81,6 +81,7 @@ impl ExtBuilder {
 			} else {
 				vec![]
 			},
+			..Default::default()
 		}
 		.assimilate_storage(&mut t)
 		.unwrap();
diff --git a/substrate/frame/transaction-payment/src/tests.rs b/substrate/frame/transaction-payment/src/tests.rs
index bde1bf64728..8349df30667 100644
--- a/substrate/frame/transaction-payment/src/tests.rs
+++ b/substrate/frame/transaction-payment/src/tests.rs
@@ -99,6 +99,7 @@ impl ExtBuilder {
 			} else {
 				vec![]
 			},
+			..Default::default()
 		}
 		.assimilate_storage(&mut t)
 		.unwrap();
diff --git a/substrate/frame/transaction-storage/src/mock.rs b/substrate/frame/transaction-storage/src/mock.rs
index 84a77043d57..25f44b953bf 100644
--- a/substrate/frame/transaction-storage/src/mock.rs
+++ b/substrate/frame/transaction-storage/src/mock.rs
@@ -65,6 +65,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 		system: Default::default(),
 		balances: pallet_balances::GenesisConfig::<Test> {
 			balances: vec![(1, 1000000000), (2, 100), (3, 100), (4, 100)],
+			..Default::default()
 		},
 		transaction_storage: pallet_transaction_storage::GenesisConfig::<Test> {
 			storage_period: 10,
diff --git a/substrate/frame/treasury/src/tests.rs b/substrate/frame/treasury/src/tests.rs
index e9efb7c0956..2c2ceac5862 100644
--- a/substrate/frame/treasury/src/tests.rs
+++ b/substrate/frame/treasury/src/tests.rs
@@ -221,6 +221,7 @@ impl ExtBuilder {
 		pallet_balances::GenesisConfig::<Test> {
 			// Total issuance will be 200 with treasury account initialized at ED.
 			balances: vec![(0, 100), (1, 98), (2, 1)],
+			..Default::default()
 		}
 		.assimilate_storage(&mut t)
 		.unwrap();
@@ -406,9 +407,12 @@ fn treasury_account_doesnt_get_deleted() {
 #[test]
 fn inexistent_account_works() {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
-	pallet_balances::GenesisConfig::<Test> { balances: vec![(0, 100), (1, 99), (2, 1)] }
-		.assimilate_storage(&mut t)
-		.unwrap();
+	pallet_balances::GenesisConfig::<Test> {
+		balances: vec![(0, 100), (1, 99), (2, 1)],
+		..Default::default()
+	}
+	.assimilate_storage(&mut t)
+	.unwrap();
 	// Treasury genesis config is not build thus treasury account does not exist
 	let mut t: sp_io::TestExternalities = t.into();
 
@@ -445,6 +449,7 @@ fn genesis_funding_works() {
 	pallet_balances::GenesisConfig::<Test> {
 		// Total issuance will be 200 with treasury account initialized with 100.
 		balances: vec![(0, 100), (Treasury::account_id(), initial_funding)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs
index fd9b3b552cc..d543f447ca7 100644
--- a/substrate/frame/tx-pause/src/mock.rs
+++ b/substrate/frame/tx-pause/src/mock.rs
@@ -157,6 +157,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	pallet_balances::GenesisConfig::<Test> {
 		// The 0 account is NOT a special origin. The rest may be:
 		balances: vec![(0, 1234), (1, 5678), (2, 5678), (3, 5678), (4, 5678)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/utility/src/tests.rs b/substrate/frame/utility/src/tests.rs
index 274a90d77cf..d075ec1ff82 100644
--- a/substrate/frame/utility/src/tests.rs
+++ b/substrate/frame/utility/src/tests.rs
@@ -237,6 +237,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 	let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
 	pallet_balances::GenesisConfig::<Test> {
 		balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)],
+		..Default::default()
 	}
 	.assimilate_storage(&mut t)
 	.unwrap();
diff --git a/substrate/frame/vesting/src/mock.rs b/substrate/frame/vesting/src/mock.rs
index f0954a5b989..8fae9bbf749 100644
--- a/substrate/frame/vesting/src/mock.rs
+++ b/substrate/frame/vesting/src/mock.rs
@@ -94,6 +94,7 @@ impl ExtBuilder {
 				(12, 10 * self.existential_deposit),
 				(13, 9999 * self.existential_deposit),
 			],
+			..Default::default()
 		}
 		.assimilate_storage(&mut t)
 		.unwrap();
diff --git a/substrate/test-utils/runtime/src/genesismap.rs b/substrate/test-utils/runtime/src/genesismap.rs
index 5c0c146d45a..e9a0e4815a2 100644
--- a/substrate/test-utils/runtime/src/genesismap.rs
+++ b/substrate/test-utils/runtime/src/genesismap.rs
@@ -130,7 +130,10 @@ impl GenesisStorageBuilder {
 				authorities: authorities_sr25519.clone(),
 				..Default::default()
 			},
-			balances: pallet_balances::GenesisConfig { balances: self.balances.clone() },
+			balances: pallet_balances::GenesisConfig {
+				balances: self.balances.clone(),
+				..Default::default()
+			},
 		}
 	}
 
diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs
index 4d24354f99a..7c092f28516 100644
--- a/substrate/test-utils/runtime/src/lib.rs
+++ b/substrate/test-utils/runtime/src/lib.rs
@@ -1329,7 +1329,7 @@ mod tests {
 				.expect("default config is there");
 			let json = String::from_utf8(r.into()).expect("returned value is json. qed.");
 
-			let expected = r#"{"system":{},"babe":{"authorities":[],"epochConfig":{"c":[1,4],"allowed_slots":"PrimaryAndSecondaryVRFSlots"}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#;
+			let expected = r#"{"system":{},"babe":{"authorities":[],"epochConfig":{"c":[1,4],"allowed_slots":"PrimaryAndSecondaryVRFSlots"}},"substrateTest":{"authorities":[]},"balances":{"balances":[],"devAccounts":null}}"#;
 			assert_eq!(expected.to_string(), json);
 		}
 
-- 
GitLab


From 66bd26d35c21ad260120129776c86870ff1dd220 Mon Sep 17 00:00:00 2001
From: "Alisher A. Khassanov" <a.khssnv@gmail.com>
Date: Thu, 23 Jan 2025 16:01:55 +0500
Subject: [PATCH 102/116] Add `offchain_localStorageClear` RPC method (#7266)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

# Description

Closes https://github.com/paritytech/polkadot-sdk/issues/7265.

## Integration

Requires changes in
`https://github.com/polkadot-js/api/packages/{rpc-augment,types-support,types}`
to be visible in Polkadot\Substrate Portal and in other libraries where
we should explicitly state RPC methods.

Accompany PR to `polkadot-js/api`:
https://github.com/polkadot-js/api/pull/6070.

## Review Notes

Please put the right label on my PR.

---------

Co-authored-by: command-bot <>
Co-authored-by: Bastian Köcher <git@kchr.de>
---
 prdoc/pr_7266.prdoc                          | 13 +++++++++++++
 substrate/client/offchain/src/api.rs         |  8 +++++++-
 substrate/client/rpc-api/src/offchain/mod.rs |  4 ++++
 substrate/client/rpc/src/offchain/mod.rs     | 17 +++++++++++++++++
 substrate/client/rpc/src/offchain/tests.rs   | 13 ++++++++++++-
 5 files changed, 53 insertions(+), 2 deletions(-)
 create mode 100644 prdoc/pr_7266.prdoc

diff --git a/prdoc/pr_7266.prdoc b/prdoc/pr_7266.prdoc
new file mode 100644
index 00000000000..4fa7ddb7b41
--- /dev/null
+++ b/prdoc/pr_7266.prdoc
@@ -0,0 +1,13 @@
+title: Add `offchain_localStorageClear` RPC method
+doc:
+- audience: Node Operator
+  description: |-
+    Adds RPC method `offchain_localStorageClear` to clear the offchain local storage.
+crates:
+- name: sc-offchain
+  bump: minor
+- name: sc-rpc-api
+  bump: minor
+  validate: false
+- name: sc-rpc
+  bump: minor
diff --git a/substrate/client/offchain/src/api.rs b/substrate/client/offchain/src/api.rs
index a5981f14c09..7d5c07deca4 100644
--- a/substrate/client/offchain/src/api.rs
+++ b/substrate/client/offchain/src/api.rs
@@ -375,7 +375,7 @@ mod tests {
 	}
 
 	#[test]
-	fn should_set_and_get_local_storage() {
+	fn should_set_get_and_clear_local_storage() {
 		// given
 		let kind = StorageKind::PERSISTENT;
 		let mut api = offchain_db();
@@ -387,6 +387,12 @@ mod tests {
 
 		// then
 		assert_eq!(api.local_storage_get(kind, key), Some(b"value".to_vec()));
+
+		// when
+		api.local_storage_clear(kind, key);
+
+		// then
+		assert_eq!(api.local_storage_get(kind, key), None);
 	}
 
 	#[test]
diff --git a/substrate/client/rpc-api/src/offchain/mod.rs b/substrate/client/rpc-api/src/offchain/mod.rs
index 4dd5b066d49..606d441231b 100644
--- a/substrate/client/rpc-api/src/offchain/mod.rs
+++ b/substrate/client/rpc-api/src/offchain/mod.rs
@@ -31,6 +31,10 @@ pub trait OffchainApi {
 	#[method(name = "offchain_localStorageSet", with_extensions)]
 	fn set_local_storage(&self, kind: StorageKind, key: Bytes, value: Bytes) -> Result<(), Error>;
 
+	/// Clear offchain local storage under given key and prefix.
+	#[method(name = "offchain_localStorageClear", with_extensions)]
+	fn clear_local_storage(&self, kind: StorageKind, key: Bytes) -> Result<(), Error>;
+
 	/// Get offchain local storage under given key and prefix.
 	#[method(name = "offchain_localStorageGet", with_extensions)]
 	fn get_local_storage(&self, kind: StorageKind, key: Bytes) -> Result<Option<Bytes>, Error>;
diff --git a/substrate/client/rpc/src/offchain/mod.rs b/substrate/client/rpc/src/offchain/mod.rs
index af6bc1ba58c..f5b1b35be10 100644
--- a/substrate/client/rpc/src/offchain/mod.rs
+++ b/substrate/client/rpc/src/offchain/mod.rs
@@ -66,6 +66,23 @@ impl<T: OffchainStorage + 'static> OffchainApiServer for Offchain<T> {
 		Ok(())
 	}
 
+	fn clear_local_storage(
+		&self,
+		ext: &Extensions,
+		kind: StorageKind,
+		key: Bytes,
+	) -> Result<(), Error> {
+		check_if_safe(ext)?;
+
+		let prefix = match kind {
+			StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX,
+			StorageKind::LOCAL => return Err(Error::UnavailableStorageKind),
+		};
+		self.storage.write().remove(prefix, &key);
+
+		Ok(())
+	}
+
 	fn get_local_storage(
 		&self,
 		ext: &Extensions,
diff --git a/substrate/client/rpc/src/offchain/tests.rs b/substrate/client/rpc/src/offchain/tests.rs
index 41f22c2dc96..6b8225a7b5e 100644
--- a/substrate/client/rpc/src/offchain/tests.rs
+++ b/substrate/client/rpc/src/offchain/tests.rs
@@ -35,9 +35,14 @@ fn local_storage_should_work() {
 		Ok(())
 	);
 	assert_matches!(
-		offchain.get_local_storage(&ext, StorageKind::PERSISTENT, key),
+		offchain.get_local_storage(&ext, StorageKind::PERSISTENT, key.clone()),
 		Ok(Some(ref v)) if *v == value
 	);
+	assert_matches!(
+		offchain.clear_local_storage(&ext, StorageKind::PERSISTENT, key.clone()),
+		Ok(())
+	);
+	assert_matches!(offchain.get_local_storage(&ext, StorageKind::PERSISTENT, key), Ok(None));
 }
 
 #[test]
@@ -55,6 +60,12 @@ fn offchain_calls_considered_unsafe() {
 			assert_eq!(e.to_string(), "RPC call is unsafe to be called externally")
 		}
 	);
+	assert_matches!(
+		offchain.clear_local_storage(&ext, StorageKind::PERSISTENT, key.clone()),
+		Err(Error::UnsafeRpcCalled(e)) => {
+			assert_eq!(e.to_string(), "RPC call is unsafe to be called externally")
+		}
+	);
 	assert_matches!(
 		offchain.get_local_storage(&ext, StorageKind::PERSISTENT, key),
 		Err(Error::UnsafeRpcCalled(e)) => {
-- 
GitLab


From 085da479dee8e09ad3de83dbc59b304bd36b4ebe Mon Sep 17 00:00:00 2001
From: Branislav Kontur <bkontur@gmail.com>
Date: Thu, 23 Jan 2025 12:55:14 +0100
Subject: [PATCH 103/116] Bridges small nits/improvements (#7307)

This PR contains small fixes identified during work on the larger PR:
https://github.com/paritytech/polkadot-sdk/issues/6906.

---------

Co-authored-by: command-bot <>
---
 Cargo.lock                                       |  3 ---
 bridges/bin/runtime-common/Cargo.toml            |  2 --
 bridges/bin/runtime-common/src/integrity.rs      |  9 +++++----
 .../integration-tests/emulated/common/Cargo.toml |  1 -
 .../bridge-hubs/bridge-hub-rococo/Cargo.toml     |  2 --
 .../src/bridge_to_bulletin_config.rs             |  1 +
 .../src/bridge_to_westend_config.rs              |  1 +
 .../src/bridge_to_rococo_config.rs               |  1 +
 polkadot/xcm/xcm-builder/src/barriers.rs         |  2 +-
 prdoc/pr_7307.prdoc                              | 16 ++++++++++++++++
 10 files changed, 25 insertions(+), 13 deletions(-)
 create mode 100644 prdoc/pr_7307.prdoc

diff --git a/Cargo.lock b/Cargo.lock
index a10def370be..5cc898714d3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2671,7 +2671,6 @@ version = "0.5.0"
 dependencies = [
  "bp-asset-hub-rococo",
  "bp-asset-hub-westend",
- "bp-bridge-hub-polkadot",
  "bp-bridge-hub-rococo",
  "bp-bridge-hub-westend",
  "bp-header-chain 0.7.0",
@@ -3018,7 +3017,6 @@ dependencies = [
  "bp-relayers 0.7.0",
  "bp-runtime 0.7.0",
  "bp-test-utils 0.7.0",
- "bp-xcm-bridge-hub 0.2.0",
  "frame-support 28.0.0",
  "frame-system 28.0.0",
  "log",
@@ -6436,7 +6434,6 @@ dependencies = [
  "asset-test-utils 7.0.0",
  "bp-messages 0.7.0",
  "bp-xcm-bridge-hub 0.2.0",
- "bridge-runtime-common 0.7.0",
  "cumulus-pallet-parachain-system 0.7.0",
  "cumulus-pallet-xcmp-queue 0.7.0",
  "cumulus-primitives-core 0.7.0",
diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml
index 49cd086fd3e..b5ec37a24a8 100644
--- a/bridges/bin/runtime-common/Cargo.toml
+++ b/bridges/bin/runtime-common/Cargo.toml
@@ -24,7 +24,6 @@ bp-parachains = { workspace = true }
 bp-polkadot-core = { workspace = true }
 bp-relayers = { workspace = true }
 bp-runtime = { workspace = true }
-bp-xcm-bridge-hub = { workspace = true }
 pallet-bridge-grandpa = { workspace = true }
 pallet-bridge-messages = { workspace = true }
 pallet-bridge-parachains = { workspace = true }
@@ -63,7 +62,6 @@ std = [
 	"bp-relayers/std",
 	"bp-runtime/std",
 	"bp-test-utils/std",
-	"bp-xcm-bridge-hub/std",
 	"codec/std",
 	"frame-support/std",
 	"frame-system/std",
diff --git a/bridges/bin/runtime-common/src/integrity.rs b/bridges/bin/runtime-common/src/integrity.rs
index 61dbf09109a..0fc377090cf 100644
--- a/bridges/bin/runtime-common/src/integrity.rs
+++ b/bridges/bin/runtime-common/src/integrity.rs
@@ -30,7 +30,6 @@ use pallet_bridge_messages::{ThisChainOf, WeightInfoExt as _};
 // Re-export to avoid include all dependencies everywhere.
 #[doc(hidden)]
 pub mod __private {
-	pub use bp_xcm_bridge_hub;
 	pub use static_assertions;
 }
 
@@ -66,9 +65,9 @@ macro_rules! assert_bridge_messages_pallet_types(
 		with_bridged_chain_messages_instance: $i:path,
 		this_chain: $this:path,
 		bridged_chain: $bridged:path,
+		expected_payload_type: $payload:path,
 	) => {
 		{
-			use $crate::integrity::__private::bp_xcm_bridge_hub::XcmAsPlainPayload;
 			use $crate::integrity::__private::static_assertions::assert_type_eq_all;
 			use bp_messages::ChainWithMessages;
 			use bp_runtime::Chain;
@@ -81,8 +80,8 @@ macro_rules! assert_bridge_messages_pallet_types(
 			assert_type_eq_all!(<$r as BridgeMessagesConfig<$i>>::ThisChain, $this);
 			assert_type_eq_all!(<$r as BridgeMessagesConfig<$i>>::BridgedChain, $bridged);
 
-			assert_type_eq_all!(<$r as BridgeMessagesConfig<$i>>::OutboundPayload, XcmAsPlainPayload);
-			assert_type_eq_all!(<$r as BridgeMessagesConfig<$i>>::InboundPayload, XcmAsPlainPayload);
+			assert_type_eq_all!(<$r as BridgeMessagesConfig<$i>>::OutboundPayload, $payload);
+			assert_type_eq_all!(<$r as BridgeMessagesConfig<$i>>::InboundPayload, $payload);
 		}
 	}
 );
@@ -97,6 +96,7 @@ macro_rules! assert_complete_bridge_types(
 		with_bridged_chain_messages_instance: $mi:path,
 		this_chain: $this:path,
 		bridged_chain: $bridged:path,
+		expected_payload_type: $payload:path,
 	) => {
 		$crate::assert_chain_types!(runtime: $r, this_chain: $this);
 		$crate::assert_bridge_messages_pallet_types!(
@@ -104,6 +104,7 @@ macro_rules! assert_complete_bridge_types(
 			with_bridged_chain_messages_instance: $mi,
 			this_chain: $this,
 			bridged_chain: $bridged,
+			expected_payload_type: $payload,
 		);
 	}
 );
diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml
index e921deb9c62..4bd45ef1a87 100644
--- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml
+++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml
@@ -46,6 +46,5 @@ xcm-emulator = { workspace = true, default-features = true }
 # Bridges
 bp-messages = { workspace = true, default-features = true }
 bp-xcm-bridge-hub = { workspace = true, default-features = true }
-bridge-runtime-common = { workspace = true, default-features = true }
 pallet-bridge-messages = { workspace = true, default-features = true }
 pallet-xcm-bridge-hub = { workspace = true, default-features = true }
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml
index 3dba65ae99f..b3d48adfedc 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml
@@ -86,7 +86,6 @@ testnet-parachains-constants = { features = ["rococo"], workspace = true }
 # Bridges
 bp-asset-hub-rococo = { workspace = true }
 bp-asset-hub-westend = { workspace = true }
-bp-bridge-hub-polkadot = { workspace = true }
 bp-bridge-hub-rococo = { workspace = true }
 bp-bridge-hub-westend = { workspace = true }
 bp-header-chain = { workspace = true }
@@ -132,7 +131,6 @@ default = ["std"]
 std = [
 	"bp-asset-hub-rococo/std",
 	"bp-asset-hub-westend/std",
-	"bp-bridge-hub-polkadot/std",
 	"bp-bridge-hub-rococo/std",
 	"bp-bridge-hub-westend/std",
 	"bp-header-chain/std",
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs
index 1e733503f43..1f58e9c2f2b 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs
@@ -203,6 +203,7 @@ mod tests {
 			with_bridged_chain_messages_instance: WithRococoBulletinMessagesInstance,
 			this_chain: bp_bridge_hub_rococo::BridgeHubRococo,
 			bridged_chain: bp_polkadot_bulletin::PolkadotBulletin,
+			expected_payload_type: XcmAsPlainPayload,
 		);
 
 		// we can't use `assert_complete_bridge_constants` here, because there's a trick with
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs
index a14101eb454..d394af73e74 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs
@@ -295,6 +295,7 @@ mod tests {
 			with_bridged_chain_messages_instance: WithBridgeHubWestendMessagesInstance,
 			this_chain: bp_bridge_hub_rococo::BridgeHubRococo,
 			bridged_chain: bp_bridge_hub_westend::BridgeHubWestend,
+			expected_payload_type: XcmAsPlainPayload,
 		);
 
 		assert_complete_with_parachain_bridge_constants::<
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs
index 24e5482b7b0..a5fb33cf504 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs
@@ -323,6 +323,7 @@ mod tests {
 			with_bridged_chain_messages_instance: WithBridgeHubRococoMessagesInstance,
 			this_chain: bp_bridge_hub_westend::BridgeHubWestend,
 			bridged_chain: bp_bridge_hub_rococo::BridgeHubRococo,
+			expected_payload_type: XcmAsPlainPayload,
 		);
 
 		assert_complete_with_parachain_bridge_constants::<
diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs
index adba9a3ef79..27153a3f441 100644
--- a/polkadot/xcm/xcm-builder/src/barriers.rs
+++ b/polkadot/xcm/xcm-builder/src/barriers.rs
@@ -490,7 +490,7 @@ impl ShouldExecute for DenyReserveTransferToRelayChain {
 					if matches!(origin, Location { parents: 1, interior: Here }) =>
 				{
 					log::warn!(
-						target: "xcm::barrier",
+						target: "xcm::barriers",
 						"Unexpected ReserveAssetDeposited from the Relay Chain",
 					);
 					Ok(ControlFlow::Continue(()))
diff --git a/prdoc/pr_7307.prdoc b/prdoc/pr_7307.prdoc
new file mode 100644
index 00000000000..b27aace0bd1
--- /dev/null
+++ b/prdoc/pr_7307.prdoc
@@ -0,0 +1,16 @@
+title: Bridges small nits/improvements
+doc:
+- audience: Runtime Dev
+  description: |
+      This PR introduces a new `expected_payload_type` parameter to the Bridges `assert_complete_bridge_types` macro.
+crates:
+- name: bridge-runtime-common
+  bump: patch
+- name: bridge-hub-rococo-runtime
+  bump: patch
+- name: bridge-hub-westend-runtime
+  bump: patch
+- name: staging-xcm-builder
+  bump: patch
+- name: emulated-integration-tests-common
+  bump: patch
-- 
GitLab


From cfc5b6f59a1fa46aa55144bff5eb7fca14e27e2b Mon Sep 17 00:00:00 2001
From: Alin Dima <alin@parity.io>
Date: Thu, 23 Jan 2025 15:00:31 +0200
Subject: [PATCH 104/116] bump lookahead to 3 for testnet genesis (#7252)

This is the right value after
https://github.com/paritytech/polkadot-sdk/pull/4880, which corresponds
to an allowedAncestryLen of 2 (which is the default)

WIll fix https://github.com/paritytech/polkadot-sdk/issues/7105
---
 polkadot/runtime/rococo/src/genesis_config_presets.rs         | 2 +-
 polkadot/runtime/westend/src/genesis_config_presets.rs        | 2 +-
 .../tests/elastic_scaling/doesnt_break_parachains.rs          | 4 ++--
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/polkadot/runtime/rococo/src/genesis_config_presets.rs b/polkadot/runtime/rococo/src/genesis_config_presets.rs
index a96a509b0e4..83bd1fbbc8f 100644
--- a/polkadot/runtime/rococo/src/genesis_config_presets.rs
+++ b/polkadot/runtime/rococo/src/genesis_config_presets.rs
@@ -134,7 +134,7 @@ fn default_parachains_host_configuration(
 				1u8 << (FeatureIndex::CandidateReceiptV2 as usize),
 		),
 		scheduler_params: SchedulerParams {
-			lookahead: 2,
+			lookahead: 3,
 			group_rotation_frequency: 20,
 			paras_availability_period: 4,
 			..Default::default()
diff --git a/polkadot/runtime/westend/src/genesis_config_presets.rs b/polkadot/runtime/westend/src/genesis_config_presets.rs
index ea5aff554e8..729df20b3c6 100644
--- a/polkadot/runtime/westend/src/genesis_config_presets.rs
+++ b/polkadot/runtime/westend/src/genesis_config_presets.rs
@@ -137,7 +137,7 @@ fn default_parachains_host_configuration(
 				1u8 << (FeatureIndex::CandidateReceiptV2 as usize),
 		),
 		scheduler_params: SchedulerParams {
-			lookahead: 2,
+			lookahead: 3,
 			group_rotation_frequency: 20,
 			paras_availability_period: 4,
 			..Default::default()
diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs
index f83400d2b22..e65029d7095 100644
--- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs
+++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs
@@ -120,8 +120,8 @@ async fn doesnt_break_parachains_test() -> Result<(), anyhow::Error> {
 	assert_eq!(
 		cq,
 		[
-			(CoreIndex(0), [para_id, para_id].into_iter().collect()),
-			(CoreIndex(1), [para_id, para_id].into_iter().collect()),
+			(CoreIndex(0), std::iter::repeat(para_id).take(3).collect()),
+			(CoreIndex(1), std::iter::repeat(para_id).take(3).collect()),
 		]
 		.into_iter()
 		.collect()
-- 
GitLab


From 6091330ae6d799bcf34d366acda7aff91c609ab1 Mon Sep 17 00:00:00 2001
From: Maksym H <1177472+mordamax@users.noreply.github.com>
Date: Thu, 23 Jan 2025 13:30:26 +0000
Subject: [PATCH 105/116] Refactor command bot and drop rejecting non
 paritytech members (#7231)

Aims to
- close #7049
- close https://github.com/paritytech/opstooling/issues/449
- close https://github.com/paritytech/opstooling/issues/463

What's changed:
- removed is paritytech member check as required prerequisite to run a
command
- run the cmd.py script taking it from master, if someone who run this
is not a member of paritytech, and from current branch, if is a member.
That keeps the developer experience & easy testing if paritytech members
are contributing to cmd.py
- isolate the cmd job from being able to access GH App token or PR
token- currently the fmt/bench/prdoc are being run with limited
permissions scope, just to generate output, which then is uploaded to
artifacts, and then the other job which doesn't run any files from repo,
does a commit/push more securely
---
 .github/workflows/cmd.yml | 428 +++++++++++++++++++++-----------------
 1 file changed, 240 insertions(+), 188 deletions(-)

diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml
index 42b2eab3b9e..50e71f2699d 100644
--- a/.github/workflows/cmd.yml
+++ b/.github/workflows/cmd.yml
@@ -5,7 +5,7 @@ on:
     types: [created]
 
 permissions: # allow the action to comment on the PR
-  contents: write
+  contents: read
   issues: write
   pull-requests: write
   actions: read
@@ -55,38 +55,9 @@ jobs:
 
             return 'false';
 
-  reject-non-members:
-    needs: is-org-member
-    if: ${{ startsWith(github.event.comment.body, '/cmd') && needs.is-org-member.outputs.member != 'true' }}
-    runs-on: ubuntu-latest
-    steps:
-      - name: Add reaction to rejected comment
-        uses: actions/github-script@v7
-        with:
-          github-token: ${{ secrets.GITHUB_TOKEN }}
-          script: |
-            github.rest.reactions.createForIssueComment({
-              comment_id: ${{ github.event.comment.id }},
-              owner: context.repo.owner,
-              repo: context.repo.repo,
-              content: 'confused'
-            })
-
-      - name: Comment PR (Rejected)
-        uses: actions/github-script@v7
-        with:
-          github-token: ${{ secrets.GITHUB_TOKEN }}
-          script: |
-            github.rest.issues.createComment({
-              issue_number: context.issue.number,
-              owner: context.repo.owner,
-              repo: context.repo.repo,
-              body: `Sorry, only members of the organization ${{ github.event.repository.owner.login }} members can run commands.`
-            })
 
   acknowledge:
-    needs: is-org-member
-    if: ${{ startsWith(github.event.comment.body, '/cmd') && needs.is-org-member.outputs.member == 'true' }}
+    if: ${{ startsWith(github.event.comment.body, '/cmd') }}
     runs-on: ubuntu-latest
     steps:
       - name: Add reaction to triggered comment
@@ -102,12 +73,11 @@ jobs:
             })
 
   clean:
-    needs: is-org-member
     runs-on: ubuntu-latest
     steps:
       - name: Clean previous comments
-        if: ${{ startsWith(github.event.comment.body, '/cmd') && contains(github.event.comment.body, '--clean') && needs.is-org-member.outputs.member == 'true' }}
         uses: actions/github-script@v7
+        if: ${{ startsWith(github.event.comment.body, '/cmd') && contains(github.event.comment.body, '--clean') }}
         with:
           github-token: ${{ secrets.GITHUB_TOKEN }}
           script: |
@@ -139,25 +109,72 @@ jobs:
                 }
               }
             })
-  help:
-    needs: [clean, is-org-member]
-    if: ${{ startsWith(github.event.comment.body, '/cmd') && contains(github.event.comment.body, '--help') && needs.is-org-member.outputs.member == 'true' }}
+
+  get-pr-info:
+    if: ${{ startsWith(github.event.comment.body, '/cmd') }}
     runs-on: ubuntu-latest
+    outputs:
+      CMD: ${{ steps.get-comment.outputs.group2 }}
+      pr-branch: ${{ steps.get-pr.outputs.pr_branch }}
+      repo: ${{ steps.get-pr.outputs.repo }}
     steps:
-      - name: Checkout
-        uses: actions/checkout@v4
-
       - name: Get command
         uses: actions-ecosystem/action-regex-match@v2
-        id: get-pr-comment
+        id: get-comment
         with:
           text: ${{ github.event.comment.body }}
           regex: "^(\\/cmd )([-\\/\\s\\w.=:]+)$" # see explanation in docs/contributor/commands-readme.md#examples
+      
+      # Get PR branch name, because the issue_comment event does not contain the PR branch name
+      - name: Check if the issue is a PR
+        id: check-pr
+        run: |
+          if [ -n "${{ github.event.issue.pull_request.url }}" ]; then
+            echo "This is a pull request comment"
+          else
+            echo "This is not a pull request comment"
+            exit 1
+          fi
+
+      - name: Get PR Branch Name and Repo
+        if: steps.check-pr.outcome == 'success'
+        id: get-pr
+        uses: actions/github-script@v7
+        with:
+          script: |
+            const pr = await github.rest.pulls.get({
+              owner: context.repo.owner,
+              repo: context.repo.repo,
+              pull_number: context.issue.number,
+            });
+            const prBranch = pr.data.head.ref;
+            const repo = pr.data.head.repo.full_name;
+            console.log(prBranch, repo)
+            core.setOutput('pr_branch', prBranch);
+            core.setOutput('repo', repo);
+
+      - name: Use PR Branch Name and Repo
+        env:
+          PR_BRANCH: ${{ steps.get-pr.outputs.pr_branch }}
+          REPO: ${{ steps.get-pr.outputs.repo }}
+          CMD: ${{ steps.get-comment.outputs.group2 }}
+        run: |
+          echo "The PR branch is $PR_BRANCH"
+          echo "The repository is $REPO"
+          echo "The CMD is $CMD"
+
+  help:
+    needs: [clean, get-pr-info]
+    if: ${{ startsWith(github.event.comment.body, '/cmd') && contains(github.event.comment.body, '--help') }}
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v4
 
       - name: Save output of help
         id: help
         env:
-          CMD: ${{ steps.get-pr-comment.outputs.group2 }} # to avoid "" around the command
+          CMD: ${{ needs.get-pr-info.outputs.CMD }} # to avoid "" around the command
         run: |
           python3 -m pip install -r .github/scripts/generate-prdoc.requirements.txt
           echo 'help<<EOF' >> $GITHUB_OUTPUT
@@ -209,9 +226,11 @@ jobs:
             })
 
   set-image:
-    needs: [clean, is-org-member]
-    if: ${{ startsWith(github.event.comment.body, '/cmd') && !contains(github.event.comment.body, '--help') && needs.is-org-member.outputs.member == 'true' }}
+    needs: [clean, get-pr-info]
+    if: ${{ startsWith(github.event.comment.body, '/cmd') && !contains(github.event.comment.body, '--help') }}
     runs-on: ubuntu-latest
+    env: 
+      CMD: ${{ needs.get-pr-info.outputs.CMD }}
     outputs:
       IMAGE: ${{ steps.set-image.outputs.IMAGE }}
       RUNNER: ${{ steps.set-image.outputs.RUNNER }}
@@ -221,7 +240,7 @@ jobs:
 
       - id: set-image
         run: |
-          BODY=$(echo "${{ github.event.comment.body }}" | xargs)
+          BODY=$(echo "$CMD" | xargs) # remove whitespace
           IMAGE_OVERRIDE=$(echo $BODY | grep -oe 'docker.io/paritytech/ci-unified:.*\s' | xargs)
 
           cat .github/env >> $GITHUB_OUTPUT
@@ -243,87 +262,17 @@ jobs:
           echo "RUNNER=${{ steps.set-image.outputs.RUNNER }}"
           echo "IMAGE=${{ steps.set-image.outputs.IMAGE }}"
 
-  # Get PR branch name, because the issue_comment event does not contain the PR branch name
-  get-pr-branch:
-    needs: [set-image]
+  before-cmd:
+    needs: [set-image, get-pr-info]
     runs-on: ubuntu-latest
-    outputs:
-      pr-branch: ${{ steps.get-pr.outputs.pr_branch }}
-      repo: ${{ steps.get-pr.outputs.repo }}
-    steps:
-      - name: Check if the issue is a PR
-        id: check-pr
-        run: |
-          if [ -n "${{ github.event.issue.pull_request.url }}" ]; then
-            echo "This is a pull request comment"
-          else
-            echo "This is not a pull request comment"
-            exit 1
-          fi
-
-      - name: Get PR Branch Name and Repo
-        if: steps.check-pr.outcome == 'success'
-        id: get-pr
-        uses: actions/github-script@v7
-        with:
-          script: |
-            const pr = await github.rest.pulls.get({
-              owner: context.repo.owner,
-              repo: context.repo.repo,
-              pull_number: context.issue.number,
-            });
-            const prBranch = pr.data.head.ref;
-            const repo = pr.data.head.repo.full_name;
-            console.log(prBranch, repo)
-            core.setOutput('pr_branch', prBranch);
-            core.setOutput('repo', repo);
-
-      - name: Use PR Branch Name and Repo
-        run: |
-          echo "The PR branch is ${{ steps.get-pr.outputs.pr_branch }}"
-          echo "The repository is ${{ steps.get-pr.outputs.repo }}"
-
-  cmd:
-    needs: [set-image, get-pr-branch]
     env:
       JOB_NAME: "cmd"
-    runs-on: ${{ needs.set-image.outputs.RUNNER }}
-    container:
-      image: ${{ needs.set-image.outputs.IMAGE }}
-    timeout-minutes: 1440 # 24 hours per runtime
+      CMD: ${{ needs.get-pr-info.outputs.CMD }}
+      PR_BRANCH: ${{ needs.get-pr-info.outputs.pr-branch }}
+    outputs:
+      job_url: ${{ steps.build-link.outputs.job_url }}
+      run_url: ${{ steps.build-link.outputs.run_url }}
     steps:
-      - name: Generate token
-        uses: actions/create-github-app-token@v1
-        id: generate_token
-        with:
-          app-id: ${{ secrets.CMD_BOT_APP_ID }}
-          private-key: ${{ secrets.CMD_BOT_APP_KEY }}
-
-      - name: Checkout
-        uses: actions/checkout@v4
-        with:
-          token: ${{ steps.generate_token.outputs.token }}
-          repository: ${{ needs.get-pr-branch.outputs.repo }}
-          ref: ${{ needs.get-pr-branch.outputs.pr-branch }}
-
-      - name: Get command
-        uses: actions-ecosystem/action-regex-match@v2
-        id: get-pr-comment
-        with:
-          text: ${{ github.event.comment.body }}
-          regex: "^(\\/cmd )([-\\/\\s\\w.=:]+)$" # see explanation in docs/contributor/commands-readme.md#examples
-
-      # In order to run prdoc without specifying the PR number, we need to add the PR number as an argument automatically
-      - name: Prepare PR Number argument
-        id: pr-arg
-        run: |
-          CMD="${{ steps.get-pr-comment.outputs.group2 }}"
-          if echo "$CMD" | grep -q "prdoc" && ! echo "$CMD" | grep -qE "\-\-pr[[:space:]=][0-9]+"; then
-            echo "arg=--pr ${{ github.event.issue.number }}" >> $GITHUB_OUTPUT
-          else
-            echo "arg=" >> $GITHUB_OUTPUT
-          fi
-
       - name: Build workflow link
         if: ${{ !contains(github.event.comment.body, '--quiet') }}
         id: build-link
@@ -346,40 +295,90 @@ jobs:
 
       - name: Comment PR (Start)
         # No need to comment on prdoc start or if --quiet
-        if: ${{ !contains(github.event.comment.body, '--quiet') && !contains(github.event.comment.body, 'prdoc') }}
+        if: ${{ !contains(github.event.comment.body, '--quiet') && !startsWith(needs.get-pr-info.outputs.CMD, 'prdoc') && !startsWith(needs.get-pr-info.outputs.CMD, 'fmt')}}
         uses: actions/github-script@v7
         with:
           github-token: ${{ secrets.GITHUB_TOKEN }}
           script: |
             let job_url = ${{ steps.build-link.outputs.job_url }}
-
+            let cmd = process.env.CMD;
             github.rest.issues.createComment({
               issue_number: context.issue.number,
               owner: context.repo.owner,
               repo: context.repo.repo,
-              body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has started 🚀 [See logs here](${job_url})`
+              body: `Command "${cmd}" has started 🚀 [See logs here](${job_url})`
             })
+
+  cmd:
+    needs: [before-cmd, set-image, get-pr-info, is-org-member]
+    env:
+      CMD: ${{ needs.get-pr-info.outputs.CMD }}
+      PR_BRANCH: ${{ needs.get-pr-info.outputs.pr-branch }}
+    runs-on: ${{ needs.set-image.outputs.RUNNER }}
+    container:
+      image: ${{ needs.set-image.outputs.IMAGE }}
+    timeout-minutes: 1440 # 24 hours per runtime
+    # lowerdown permissions to separate permissions context for executable parts by contributors
+    permissions:
+      contents: read
+      pull-requests: none
+      actions: none
+      issues: none
+    outputs:
+      cmd_output: ${{ steps.cmd.outputs.cmd_output }}
+      subweight: ${{ steps.subweight.outputs.result }}
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v4
+        with:
+          repository: ${{ needs.get-pr-info.outputs.repo }}
+          ref: ${{ needs.get-pr-info.outputs.pr-branch }}
+
+      # In order to run prdoc without specifying the PR number, we need to add the PR number as an argument automatically
+      - name: Prepare PR Number argument
+        id: pr-arg
+        run: |
+          CMD="${{ needs.get-pr-info.outputs.CMD }}"
+          if echo "$CMD" | grep -q "prdoc" && ! echo "$CMD" | grep -qE "\-\-pr[[:space:]=][0-9]+"; then
+            echo "arg=--pr ${{ github.event.issue.number }}" >> $GITHUB_OUTPUT
+          else
+            echo "arg=" >> $GITHUB_OUTPUT
+          fi
       
       - name: Install dependencies for bench
-        if: startsWith(steps.get-pr-comment.outputs.group2, 'bench')
+        if: startsWith(needs.get-pr-info.outputs.CMD, 'bench')
         run: |
-          cargo install subweight --locked
           cargo install --path substrate/utils/frame/omni-bencher --locked
 
       - name: Run cmd
         id: cmd
         env:
-          CMD: ${{ steps.get-pr-comment.outputs.group2 }} # to avoid "" around the command
           PR_ARG: ${{ steps.pr-arg.outputs.arg }}
+          IS_ORG_MEMBER: ${{ needs.is-org-member.outputs.member }}
         run: |
           echo "Running command: '$CMD $PR_ARG' on '${{ needs.set-image.outputs.RUNNER }}' runner, container: '${{ needs.set-image.outputs.IMAGE }}'"
           echo "RUST_NIGHTLY_VERSION: $RUST_NIGHTLY_VERSION"
-          # Fixes "detected dubious ownership" error in the ci
-          git config --global --add safe.directory '*'
-          git remote -v
-          cat /proc/cpuinfo
-          python3 -m pip install -r .github/scripts/generate-prdoc.requirements.txt
-          python3 .github/scripts/cmd/cmd.py $CMD $PR_ARG
+          echo "IS_ORG_MEMBER: $IS_ORG_MEMBER"
+
+          git config --global --add safe.directory $GITHUB_WORKSPACE
+          git config user.name "cmd[bot]"
+          git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
+            
+          
+          # if the user is not an org member, we need to use the bot's path from master to avoid unwanted modifications
+          if [ "$IS_ORG_MEMBER" = "true" ]; then
+            # safe to run commands from current branch
+            BOT_PATH=.github
+          else
+            # going to run commands from master
+            TMP_DIR=/tmp/polkadot-sdk
+            git clone --depth 1 --branch master https://github.com/paritytech/polkadot-sdk $TMP_DIR
+            BOT_PATH=$TMP_DIR/.github
+          fi
+
+          # install deps and run a command from master
+          python3 -m pip install -r $BOT_PATH/scripts/generate-prdoc.requirements.txt
+          python3 $BOT_PATH/scripts/cmd/cmd.py $CMD $PR_ARG
           git status
           git diff
 
@@ -393,6 +392,11 @@ jobs:
             echo 'EOF' >> $GITHUB_OUTPUT
           fi
 
+          git add -A
+          git diff HEAD > /tmp/cmd/command_diff.patch -U0
+          git commit -m "tmp cmd: $CMD" || true
+          # without push, as we're saving the diff to an artifact and subweight will compare the local branch with the remote branch
+
       - name: Upload command output
         if: ${{ always() }}
         uses: actions/upload-artifact@v4
@@ -400,38 +404,100 @@ jobs:
           name: command-output
           path: /tmp/cmd/command_output.log
 
-      # Generate token for commit, as the earlier token expires after 1 hour, while cmd can take longer
-      - name: Generate token for commit
-        uses: actions/create-github-app-token@v1
-        id: generate_token_commit
+      - name: Upload command diff
+        uses: actions/upload-artifact@v4
+        with:
+          name: command-diff
+          path: /tmp/cmd/command_diff.patch
+
+      - name: Install subweight for bench
+        if: startsWith(needs.get-pr-info.outputs.CMD, 'bench')
+        run: cargo install subweight
+
+      - name: Run Subweight for bench
+        id: subweight
+        if: startsWith(needs.get-pr-info.outputs.CMD, 'bench')
+        shell: bash
+        run: |
+          git fetch
+          git remote -v
+          echo $(git log -n 2 --oneline)
+
+          result=$(subweight compare commits \
+            --path-pattern "./**/weights/**/*.rs,./**/weights.rs" \
+            --method asymptotic \
+            --format markdown \
+            --no-color \
+            --change added changed \
+            --ignore-errors \
+            refs/remotes/origin/master $PR_BRANCH)
+
+          # Save the multiline result to the output
+          {
+            echo "result<<EOF"
+            echo "$result"
+            echo "EOF"
+          } >> $GITHUB_OUTPUT
+
+  after-cmd:
+    needs: [cmd, get-pr-info, before-cmd]
+    env:
+      CMD: ${{ needs.get-pr-info.outputs.CMD }}
+      PR_BRANCH: ${{ needs.get-pr-info.outputs.pr-branch }}
+    runs-on: ubuntu-latest
+    steps:
+      # needs to be able to trigger CI, as default token does not retrigger
+      - uses: actions/create-github-app-token@v1
+        id: generate_token
         with:
           app-id: ${{ secrets.CMD_BOT_APP_ID }}
           private-key: ${{ secrets.CMD_BOT_APP_KEY }}
 
-      - name: Commit changes
+      - name: Checkout
+        uses: actions/checkout@v4
+        with:
+          token: ${{ steps.generate_token.outputs.token }}
+          repository: ${{ needs.get-pr-info.outputs.repo }}
+          ref: ${{ needs.get-pr-info.outputs.pr-branch }}
+
+      - name: Download all artifacts
+        uses: actions/download-artifact@v4
+        with: 
+          name: command-diff
+          path: command-diff
+
+      - name: Apply & Commit changes
         run: |
+          ls -lsa .
+
+          git config --global --add safe.directory $GITHUB_WORKSPACE
+          git config user.name "cmd[bot]"
+          git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
+          git config --global pull.rebase false
+          
+          echo "Applying $file"
+          git apply "command-diff/command_diff.patch" --unidiff-zero --allow-empty
+
+          rm -rf command-diff
+
+          git status
+          
           if [ -n "$(git status --porcelain)" ]; then
-            git config --global user.name command-bot
-            git config --global user.email "<>"
-            git config --global pull.rebase false
 
-            # Push the results to the target branch
-            git remote add \
-              github \
-              "https://x-access-token:${{ steps.generate_token_commit.outputs.token }}@github.com/${{ needs.get-pr-branch.outputs.repo }}.git" || :
+            git remote -v
 
             push_changes() {
-              git push github "HEAD:${{ needs.get-pr-branch.outputs.pr-branch }}"
+              git push origin "HEAD:$PR_BRANCH"
             }
 
             git add .
             git restore --staged Cargo.lock # ignore changes in Cargo.lock
-            git commit -m "Update from ${{ github.actor }} running command '${{ steps.get-pr-comment.outputs.group2 }}'" || true
+            git commit -m "Update from ${{ github.actor }} running command '$CMD'" || true
             
             # Attempt to push changes
             if ! push_changes; then
               echo "Push failed, trying to rebase..."
-              git pull --rebase github "${{ needs.get-pr-branch.outputs.pr-branch }}"
+              git pull --rebase origin $PR_BRANCH
               # After successful rebase, try pushing again
               push_changes
             fi
@@ -439,41 +505,20 @@ jobs:
             echo "Nothing to commit";
           fi
 
-      - name: Run Subweight
-        id: subweight
-        if: startsWith(steps.get-pr-comment.outputs.group2, 'bench')
-        shell: bash
-        run: |
-          git fetch
-          result=$(subweight compare commits \
-            --path-pattern "./**/weights/**/*.rs,./**/weights.rs" \
-            --method asymptotic \
-            --format markdown \
-            --no-color \
-            --change added changed \
-            --ignore-errors \
-            refs/remotes/origin/master refs/heads/${{ needs.get-pr-branch.outputs.pr-branch }})
-
-          # Save the multiline result to the output
-          {
-            echo "result<<EOF"
-            echo "$result"
-            echo "EOF"
-          } >> $GITHUB_OUTPUT
-
       - name: Comment PR (End)
         # No need to comment on prdoc success or --quiet
-        if: ${{ !failure() && !contains(github.event.comment.body, '--quiet') && !contains(github.event.comment.body, 'prdoc') }}
+        if: ${{ needs.cmd.result == 'success' && !contains(github.event.comment.body, '--quiet') && !startsWith(needs.get-pr-info.outputs.CMD, 'prdoc') && !startsWith(needs.get-pr-info.outputs.CMD, 'fmt') }}
         uses: actions/github-script@v7
         env:
-          SUBWEIGHT: "${{ steps.subweight.outputs.result }}"
-          CMD_OUTPUT: "${{ steps.cmd.outputs.cmd_output }}"
+          SUBWEIGHT: "${{ needs.cmd.outputs.subweight }}"
+          CMD_OUTPUT: "${{ needs.cmd.outputs.cmd_output }}"
         with:
           github-token: ${{ secrets.GITHUB_TOKEN }}
           script: |
-            let runUrl = ${{ steps.build-link.outputs.run_url }}
-            let subweight = process.env.SUBWEIGHT;
-            let cmdOutput = process.env.CMD_OUTPUT;
+            let runUrl = ${{ needs.before-cmd.outputs.run_url }}
+            let subweight = process.env.SUBWEIGHT || '';
+            let cmdOutput = process.env.CMD_OUTPUT || '';
+            let cmd = process.env.CMD;
             console.log(cmdOutput);
 
             let subweightCollapsed = subweight.trim() !== '' 
@@ -488,34 +533,41 @@ jobs:
               issue_number: context.issue.number,
               owner: context.repo.owner,
               repo: context.repo.repo,
-              body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has finished ✅ [See logs here](${runUrl})${subweightCollapsed}${cmdOutputCollapsed}`
+              body: `Command "${cmd}" has finished ✅ [See logs here](${runUrl})${subweightCollapsed}${cmdOutputCollapsed}`
             })
 
+  finish:
+    needs: [get-pr-info, before-cmd, after-cmd, cmd]
+    if: ${{ always() }}
+    runs-on: ubuntu-latest
+    env:
+      CMD_OUTPUT: "${{ needs.cmd.outputs.cmd_output }}"
+      CMD: ${{ needs.get-pr-info.outputs.CMD }}
+    steps:
       - name: Comment PR (Failure)
-        if: ${{ failure() && !contains(github.event.comment.body, '--quiet') }}
+        if: ${{ needs.cmd.result == 'failure' || needs.after-cmd.result == 'failure' }}
         uses: actions/github-script@v7
-        env:
-          CMD_OUTPUT: "${{ steps.cmd.outputs.cmd_output }}"
         with:
           github-token: ${{ secrets.GITHUB_TOKEN }}
           script: |
-            let jobUrl = ${{ steps.build-link.outputs.job_url }}
+            let jobUrl = ${{ needs.before-cmd.outputs.job_url }}
             let cmdOutput = process.env.CMD_OUTPUT;
-
-            let cmdOutputCollapsed = cmdOutput.trim() !== ''
-              ? `<details>\n\n<summary>Command output:</summary>\n\n${cmdOutput}\n\n</details>` 
-              : '';
+            let cmd = process.env.CMD;
+            let cmdOutputCollapsed = '';
+            if (cmdOutput && cmdOutput.trim() !== '') {
+              cmdOutputCollapsed = `<details>\n\n<summary>Command output:</summary>\n\n${cmdOutput}\n\n</details>` 
+            }
 
             github.rest.issues.createComment({
               issue_number: context.issue.number,
               owner: context.repo.owner,
               repo: context.repo.repo,
-              body: `Command "${{ steps.get-pr-comment.outputs.group2 }}" has failed ❌! [See logs here](${jobUrl})${cmdOutputCollapsed}`
+              body: `Command "${cmd}" has failed ❌! [See logs here](${jobUrl})${cmdOutputCollapsed}`
             })
 
       - name: Add 😕 reaction on failure
+        if: ${{ needs.cmd.result == 'failure' || needs.after-cmd.result == 'failure' }}
         uses: actions/github-script@v7
-        if: ${{ failure() }}
         with:
           github-token: ${{ secrets.GITHUB_TOKEN }}
           script: |
@@ -527,8 +579,8 @@ jobs:
             })
 
       - name: Add 👍 reaction on success
+        if: ${{ needs.cmd.result == 'success' && needs.after-cmd.result == 'success' }}
         uses: actions/github-script@v7
-        if: ${{ !failure() }}
         with:
           github-token: ${{ secrets.GITHUB_TOKEN }}
           script: |
-- 
GitLab


From 3a7f3c0af63b1a7566ca29c59fa4ac274bd911f1 Mon Sep 17 00:00:00 2001
From: Maksym H <1177472+mordamax@users.noreply.github.com>
Date: Thu, 23 Jan 2025 16:08:32 +0000
Subject: [PATCH 106/116] Fix setting the image properly (#7315)

Fixed condition which sets weights/large images
---
 .github/workflows/cmd.yml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml
index 50e71f2699d..3d4779064a4 100644
--- a/.github/workflows/cmd.yml
+++ b/.github/workflows/cmd.yml
@@ -250,9 +250,9 @@ jobs:
               echo "IMAGE=$IMAGE" >> $GITHUB_OUTPUT
           fi
 
-          if [[ $BODY == "/cmd bench"* ]]; then
+          if [[ $BODY == "bench"* ]]; then
               echo "RUNNER=parity-weights" >> $GITHUB_OUTPUT
-          elif [[ $BODY == "/cmd update-ui"* ]]; then
+          elif [[ $BODY == "update-ui"* ]]; then
               echo "RUNNER=parity-large" >> $GITHUB_OUTPUT
           else
               echo "RUNNER=ubuntu-latest" >> $GITHUB_OUTPUT
-- 
GitLab


From e9393a9afc3b33cc2d01b7820a8f186434196758 Mon Sep 17 00:00:00 2001
From: Andrei Sandu <54316454+sandreim@users.noreply.github.com>
Date: Thu, 23 Jan 2025 18:53:27 +0200
Subject: [PATCH 107/116] Deprecate ParaBackingState API (#6867)

Currently the `para_backing_state` API is used only by the prospective
parachains subsystems and returns 2 things: the constraints for
parachain blocks and the candidates pending availability.

This PR deprecates `para_backing_state` and introduces a new
`backing_constraints` API that can be used together with
`candidates_pending_availability` to get the same information provided
by `para_backing_state`.

TODO:
- [x] PRDoc

---------

Signed-off-by: Andrei Sandu <andrei-mihail@parity.io>
Co-authored-by: command-bot <>
---
 .../src/blockchain_rpc_client.rs              |  12 +-
 .../src/rpc_client.rs                         |  13 +-
 .../emulated/chains/relays/rococo/src/lib.rs  |   2 +-
 .../emulated/chains/relays/westend/src/lib.rs |   2 +-
 .../src/fragment_chain/mod.rs                 |  25 +-
 .../src/fragment_chain/tests.rs               |   1 +
 .../core/prospective-parachains/src/lib.rs    |  89 ++++-
 .../core/prospective-parachains/src/tests.rs  | 369 +++++++++++++++---
 polkadot/node/core/runtime-api/src/cache.rs   |  24 +-
 polkadot/node/core/runtime-api/src/lib.rs     |  13 +
 polkadot/node/core/runtime-api/src/tests.rs   |  12 +-
 polkadot/node/subsystem-types/src/messages.rs |  10 +-
 .../subsystem-types/src/runtime_client.rs     |  23 +-
 .../src/inclusion_emulator/mod.rs             | 139 +++++--
 polkadot/node/subsystem-util/src/lib.rs       |   7 +-
 polkadot/primitives/src/runtime_api.rs        |  10 +-
 .../primitives/src/vstaging/async_backing.rs  |  40 +-
 polkadot/primitives/src/vstaging/mod.rs       |   9 +-
 .../node/backing/prospective-parachains.md    |   3 +
 .../parachains/src/runtime_api_impl/v11.rs    |  19 +-
 .../src/runtime_api_impl/vstaging.rs          |  30 ++
 polkadot/runtime/rococo/src/lib.rs            |  15 +-
 polkadot/runtime/test-runtime/src/lib.rs      |   1 +
 polkadot/runtime/westend/src/lib.rs           |  15 +-
 prdoc/pr_6867.prdoc                           |  30 ++
 25 files changed, 758 insertions(+), 155 deletions(-)
 create mode 100644 prdoc/pr_6867.prdoc

diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs
index 1086e3a52ec..862cf6af979 100644
--- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs
+++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs
@@ -26,7 +26,9 @@ use futures::{Stream, StreamExt};
 use polkadot_core_primitives::{Block, BlockNumber, Hash, Header};
 use polkadot_overseer::{ChainApiBackend, RuntimeApiSubsystemClient};
 use polkadot_primitives::{
-	async_backing::AsyncBackingParams, slashing, vstaging::async_backing::BackingState,
+	async_backing::AsyncBackingParams,
+	slashing,
+	vstaging::async_backing::{BackingState, Constraints},
 	ApprovalVotingParams, CoreIndex, NodeFeatures,
 };
 use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError};
@@ -454,6 +456,14 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient {
 			.parachain_host_candidates_pending_availability(at, para_id)
 			.await?)
 	}
+
+	async fn backing_constraints(
+		&self,
+		at: Hash,
+		para_id: ParaId,
+	) -> Result<Option<Constraints>, ApiError> {
+		Ok(self.rpc_client.parachain_host_backing_constraints(at, para_id).await?)
+	}
 }
 
 #[async_trait::async_trait]
diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs
index d7785d92c73..0467b7085ca 100644
--- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs
+++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs
@@ -35,8 +35,8 @@ use cumulus_primitives_core::{
 		async_backing::AsyncBackingParams,
 		slashing,
 		vstaging::{
-			async_backing::BackingState, CandidateEvent,
-			CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState,
+			async_backing::{BackingState, Constraints},
+			CandidateEvent, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState,
 			ScrapedOnChainVotes,
 		},
 		ApprovalVotingParams, BlockNumber, CandidateCommitments, CandidateHash, CoreIndex,
@@ -720,6 +720,15 @@ impl RelayChainRpcClient {
 		.await
 	}
 
+	pub async fn parachain_host_backing_constraints(
+		&self,
+		at: RelayHash,
+		para_id: ParaId,
+	) -> Result<Option<Constraints>, RelayChainError> {
+		self.call_remote_runtime_function("ParachainHost_backing_constraints", at, Some(para_id))
+			.await
+	}
+
 	fn send_register_message_to_worker(
 		&self,
 		message: RpcDispatcherMessage,
diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs
index bd637a5f796..240c0931ae5 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs
@@ -25,7 +25,7 @@ use emulated_integration_tests_common::{
 
 // Rococo declaration
 decl_test_relay_chains! {
-	#[api_version(11)]
+	#[api_version(12)]
 	pub struct Rococo {
 		genesis = genesis::genesis(),
 		on_init = (),
diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs
index ce9fafcd5bd..729bb3ad63d 100644
--- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs
+++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs
@@ -25,7 +25,7 @@ use emulated_integration_tests_common::{
 
 // Westend declaration
 decl_test_relay_chains! {
-	#[api_version(11)]
+	#[api_version(12)]
 	pub struct Westend {
 		genesis = genesis::genesis(),
 		on_init = (),
diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs
index ded0a3ab73b..72a76537160 100644
--- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs
+++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs
@@ -132,8 +132,8 @@ use std::{
 use super::LOG_TARGET;
 use polkadot_node_subsystem::messages::Ancestors;
 use polkadot_node_subsystem_util::inclusion_emulator::{
-	self, ConstraintModifications, Constraints, Fragment, HypotheticalOrConcreteCandidate,
-	ProspectiveCandidate, RelayChainBlockInfo,
+	self, validate_commitments, ConstraintModifications, Constraints, Fragment,
+	HypotheticalOrConcreteCandidate, ProspectiveCandidate, RelayChainBlockInfo,
 };
 use polkadot_primitives::{
 	vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, BlockNumber,
@@ -1052,7 +1052,7 @@ impl FragmentChain {
 
 		// Try seeing if the parent candidate is in the current chain or if it is the latest
 		// included candidate. If so, get the constraints the candidate must satisfy.
-		let (constraints, maybe_min_relay_parent_number) =
+		let (is_unconnected, constraints, maybe_min_relay_parent_number) =
 			if let Some(parent_candidate) = self.best_chain.by_output_head.get(&parent_head_hash) {
 				let Some(parent_candidate) =
 					self.best_chain.chain.iter().find(|c| &c.candidate_hash == parent_candidate)
@@ -1062,6 +1062,7 @@ impl FragmentChain {
 				};
 
 				(
+					false,
 					self.scope
 						.base_constraints
 						.apply_modifications(&parent_candidate.cumulative_modifications)
@@ -1070,11 +1071,10 @@ impl FragmentChain {
 				)
 			} else if self.scope.base_constraints.required_parent.hash() == parent_head_hash {
 				// It builds on the latest included candidate.
-				(self.scope.base_constraints.clone(), None)
+				(false, self.scope.base_constraints.clone(), None)
 			} else {
-				// If the parent is not yet part of the chain, there's nothing else we can check for
-				// now.
-				return Ok(())
+				// The parent is not yet part of the chain
+				(true, self.scope.base_constraints.clone(), None)
 			};
 
 		// Check for cycles or invalid tree transitions.
@@ -1088,6 +1088,17 @@ impl FragmentChain {
 			candidate.persisted_validation_data(),
 			candidate.validation_code_hash(),
 		) {
+			if is_unconnected {
+				// If the parent is not yet part of the chain, we can check the commitments only
+				// if we have the full candidate.
+				return validate_commitments(
+					&self.scope.base_constraints,
+					&relay_parent,
+					commitments,
+					&validation_code_hash,
+				)
+				.map_err(Error::CheckAgainstConstraints)
+			}
 			Fragment::check_against_constraints(
 				&relay_parent,
 				&constraints,
diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs
index 624dd74132c..9e7e570bd16 100644
--- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs
+++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs
@@ -34,6 +34,7 @@ fn make_constraints(
 		min_relay_parent_number,
 		max_pov_size: 1_000_000,
 		max_code_size: 1_000_000,
+		max_head_data_size: 20480,
 		ump_remaining: 10,
 		ump_remaining_bytes: 1_000,
 		max_ump_num_per_candidate: 10,
diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs
index 92aea8509f8..7416c97f3cd 100644
--- a/polkadot/node/core/prospective-parachains/src/lib.rs
+++ b/polkadot/node/core/prospective-parachains/src/lib.rs
@@ -45,15 +45,13 @@ use polkadot_node_subsystem::{
 use polkadot_node_subsystem_util::{
 	backing_implicit_view::{BlockInfoProspectiveParachains as BlockInfo, View as ImplicitView},
 	inclusion_emulator::{Constraints, RelayChainBlockInfo},
+	request_backing_constraints, request_candidates_pending_availability,
 	request_session_index_for_child,
 	runtime::{fetch_claim_queue, prospective_parachains_mode, ProspectiveParachainsMode},
 };
 use polkadot_primitives::{
-	vstaging::{
-		async_backing::CandidatePendingAvailability,
-		CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState,
-	},
-	BlockNumber, CandidateHash, Hash, HeadData, Header, Id as ParaId, PersistedValidationData,
+	vstaging::{CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState},
+	BlockNumber, CandidateHash, Hash, Header, Id as ParaId, PersistedValidationData,
 };
 
 use crate::{
@@ -257,8 +255,9 @@ async fn handle_active_leaves_update<Context>(
 		let mut fragment_chains = HashMap::new();
 		for para in scheduled_paras {
 			// Find constraints and pending availability candidates.
-			let backing_state = fetch_backing_state(ctx, hash, para).await?;
-			let Some((constraints, pending_availability)) = backing_state else {
+			let Some((constraints, pending_availability)) =
+				fetch_backing_constraints_and_candidates(ctx, hash, para).await?
+			else {
 				// This indicates a runtime conflict of some kind.
 				gum::debug!(
 					target: LOG_TARGET,
@@ -273,7 +272,7 @@ async fn handle_active_leaves_update<Context>(
 			let pending_availability = preprocess_candidates_pending_availability(
 				ctx,
 				&mut temp_header_cache,
-				constraints.required_parent.clone(),
+				&constraints,
 				pending_availability,
 			)
 			.await?;
@@ -445,22 +444,23 @@ struct ImportablePendingAvailability {
 async fn preprocess_candidates_pending_availability<Context>(
 	ctx: &mut Context,
 	cache: &mut HashMap<Hash, Header>,
-	required_parent: HeadData,
-	pending_availability: Vec<CandidatePendingAvailability>,
+	constraints: &Constraints,
+	pending_availability: Vec<CommittedCandidateReceipt>,
 ) -> JfyiErrorResult<Vec<ImportablePendingAvailability>> {
-	let mut required_parent = required_parent;
+	let mut required_parent = constraints.required_parent.clone();
 
 	let mut importable = Vec::new();
 	let expected_count = pending_availability.len();
 
 	for (i, pending) in pending_availability.into_iter().enumerate() {
+		let candidate_hash = pending.hash();
 		let Some(relay_parent) =
 			fetch_block_info(ctx, cache, pending.descriptor.relay_parent()).await?
 		else {
 			let para_id = pending.descriptor.para_id();
 			gum::debug!(
 				target: LOG_TARGET,
-				?pending.candidate_hash,
+				?candidate_hash,
 				?para_id,
 				index = ?i,
 				?expected_count,
@@ -478,12 +478,12 @@ async fn preprocess_candidates_pending_availability<Context>(
 			},
 			persisted_validation_data: PersistedValidationData {
 				parent_head: required_parent,
-				max_pov_size: pending.max_pov_size,
+				max_pov_size: constraints.max_pov_size as _,
 				relay_parent_number: relay_parent.number,
 				relay_parent_storage_root: relay_parent.storage_root,
 			},
 			compact: fragment_chain::PendingAvailability {
-				candidate_hash: pending.candidate_hash,
+				candidate_hash,
 				relay_parent: relay_parent.into(),
 			},
 		});
@@ -883,7 +883,7 @@ async fn fetch_backing_state<Context>(
 	ctx: &mut Context,
 	relay_parent: Hash,
 	para_id: ParaId,
-) -> JfyiErrorResult<Option<(Constraints, Vec<CandidatePendingAvailability>)>> {
+) -> JfyiErrorResult<Option<(Constraints, Vec<CommittedCandidateReceipt>)>> {
 	let (tx, rx) = oneshot::channel();
 	ctx.send_message(RuntimeApiMessage::Request(
 		relay_parent,
@@ -891,10 +891,63 @@ async fn fetch_backing_state<Context>(
 	))
 	.await;
 
-	Ok(rx
+	Ok(rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??.map(|s| {
+		(
+			From::from(s.constraints),
+			s.pending_availability
+				.into_iter()
+				.map(|c| CommittedCandidateReceipt {
+					descriptor: c.descriptor,
+					commitments: c.commitments,
+				})
+				.collect(),
+		)
+	}))
+}
+
+#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)]
+async fn fetch_backing_constraints_and_candidates<Context>(
+	ctx: &mut Context,
+	relay_parent: Hash,
+	para_id: ParaId,
+) -> JfyiErrorResult<Option<(Constraints, Vec<CommittedCandidateReceipt>)>> {
+	match fetch_backing_constraints_and_candidates_inner(ctx, relay_parent, para_id).await {
+		Err(error) => {
+			gum::debug!(
+				target: LOG_TARGET,
+				?para_id,
+				?relay_parent,
+				?error,
+				"Failed to get constraints and candidates pending availability."
+			);
+
+			// Fallback to backing state.
+			fetch_backing_state(ctx, relay_parent, para_id).await
+		},
+		Ok(maybe_constraints_and_candidatest) => Ok(maybe_constraints_and_candidatest),
+	}
+}
+
+#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)]
+async fn fetch_backing_constraints_and_candidates_inner<Context>(
+	ctx: &mut Context,
+	relay_parent: Hash,
+	para_id: ParaId,
+) -> JfyiErrorResult<Option<(Constraints, Vec<CommittedCandidateReceipt>)>> {
+	let maybe_constraints = request_backing_constraints(relay_parent, para_id, ctx.sender())
+		.await
 		.await
-		.map_err(JfyiError::RuntimeApiRequestCanceled)??
-		.map(|s| (From::from(s.constraints), s.pending_availability)))
+		.map_err(JfyiError::RuntimeApiRequestCanceled)??;
+
+	let Some(constraints) = maybe_constraints else { return Ok(None) };
+
+	let pending_availability =
+		request_candidates_pending_availability(relay_parent, para_id, ctx.sender())
+			.await
+			.await
+			.map_err(JfyiError::RuntimeApiRequestCanceled)??;
+
+	Ok(Some((From::from(constraints), pending_availability)))
 }
 
 #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)]
diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs
index 3f1eaa4e41e..5d1ef2f2f51 100644
--- a/polkadot/node/core/prospective-parachains/src/tests.rs
+++ b/polkadot/node/core/prospective-parachains/src/tests.rs
@@ -27,8 +27,8 @@ use polkadot_node_subsystem_test_helpers as test_helpers;
 use polkadot_primitives::{
 	async_backing::{AsyncBackingParams, Constraints, InboundHrmpLimitations},
 	vstaging::{
-		async_backing::BackingState, CommittedCandidateReceiptV2 as CommittedCandidateReceipt,
-		MutateDescriptorV2,
+		async_backing::{BackingState, CandidatePendingAvailability, Constraints as ConstraintsV2},
+		CommittedCandidateReceiptV2 as CommittedCandidateReceipt, MutateDescriptorV2,
 	},
 	CoreIndex, HeadData, Header, PersistedValidationData, ScheduledCore, ValidationCodeHash,
 };
@@ -44,7 +44,7 @@ const ALLOWED_ANCESTRY_LEN: u32 = 3;
 const ASYNC_BACKING_PARAMETERS: AsyncBackingParams =
 	AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: ALLOWED_ANCESTRY_LEN };
 
-const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError =
+const RUNTIME_API_NOT_SUPPORTED: RuntimeApiError =
 	RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" };
 
 const MAX_POV_SIZE: u32 = 1_000_000;
@@ -76,6 +76,31 @@ fn dummy_constraints(
 	}
 }
 
+fn dummy_constraints_v2(
+	min_relay_parent_number: BlockNumber,
+	valid_watermarks: Vec<BlockNumber>,
+	required_parent: HeadData,
+	validation_code_hash: ValidationCodeHash,
+) -> ConstraintsV2 {
+	ConstraintsV2 {
+		min_relay_parent_number,
+		max_pov_size: MAX_POV_SIZE,
+		max_head_data_size: 20480,
+		max_code_size: 1_000_000,
+		ump_remaining: 10,
+		ump_remaining_bytes: 1_000,
+		max_ump_num_per_candidate: 10,
+		dmp_remaining_messages: vec![],
+		hrmp_inbound: InboundHrmpLimitations { valid_watermarks },
+		hrmp_channels_out: vec![],
+		max_hrmp_num_per_candidate: 0,
+		required_parent,
+		validation_code_hash,
+		upgrade_restriction: None,
+		future_validation_code: None,
+	}
+}
+
 struct TestState {
 	claim_queue: BTreeMap<CoreIndex, VecDeque<ParaId>>,
 	runtime_api_version: u32,
@@ -364,47 +389,93 @@ async fn handle_leaf_activation(
 
 	let paras: HashSet<_> = test_state.claim_queue.values().flatten().collect();
 
-	for _ in 0..paras.len() {
+	// We expect two messages per parachain block.
+	for _ in 0..paras.len() * 2 {
 		let message = virtual_overseer.recv().await;
-		// Get the para we are working with since the order is not deterministic.
-		let para_id = match &message {
+		let para_id = match message {
+			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+				parent,
+				RuntimeApiRequest::ParaBackingState(p_id, tx),
+			)) if parent == *hash => {
+				let PerParaData { min_relay_parent, head_data, pending_availability } =
+					leaf.para_data(p_id);
+
+				let constraints = dummy_constraints(
+					*min_relay_parent,
+					vec![*number],
+					head_data.clone(),
+					test_state.validation_code_hash,
+				);
+
+				tx.send(Ok(Some(BackingState {
+					constraints,
+					pending_availability: pending_availability.clone(),
+				})))
+				.unwrap();
+				Some(p_id)
+			},
+			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+				parent,
+				RuntimeApiRequest::BackingConstraints(p_id, tx),
+			)) if parent == *hash &&
+				test_state.runtime_api_version >=
+					RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT =>
+			{
+				let PerParaData { min_relay_parent, head_data, pending_availability: _ } =
+					leaf.para_data(p_id);
+				let constraints = dummy_constraints_v2(
+					*min_relay_parent,
+					vec![*number],
+					head_data.clone(),
+					test_state.validation_code_hash,
+				);
+
+				tx.send(Ok(Some(constraints))).unwrap();
+				None
+			},
+			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+				parent,
+				RuntimeApiRequest::BackingConstraints(_p_id, tx),
+			)) if parent == *hash &&
+				test_state.runtime_api_version <
+					RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT =>
+			{
+				tx.send(Err(RUNTIME_API_NOT_SUPPORTED)).unwrap();
+				None
+			},
+
 			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-				_,
-				RuntimeApiRequest::ParaBackingState(p_id, _),
-			)) => *p_id,
+				parent,
+				RuntimeApiRequest::CandidatesPendingAvailability(p_id, tx),
+			)) if parent == *hash => {
+				tx.send(Ok(leaf
+					.para_data(p_id)
+					.pending_availability
+					.clone()
+					.into_iter()
+					.map(|c| CommittedCandidateReceipt {
+						descriptor: c.descriptor,
+						commitments: c.commitments,
+					})
+					.collect()))
+					.unwrap();
+				Some(p_id)
+			},
 			_ => panic!("received unexpected message {:?}", message),
 		};
 
-		let PerParaData { min_relay_parent, head_data, pending_availability } =
-			leaf.para_data(para_id);
-		let constraints = dummy_constraints(
-			*min_relay_parent,
-			vec![*number],
-			head_data.clone(),
-			test_state.validation_code_hash,
-		);
-		let backing_state =
-			BackingState { constraints, pending_availability: pending_availability.clone() };
-
-		assert_matches!(
-			message,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(parent, RuntimeApiRequest::ParaBackingState(p_id, tx))
-			) if parent == *hash && p_id == para_id => {
-				tx.send(Ok(Some(backing_state))).unwrap();
-			}
-		);
-
-		for pending in pending_availability {
-			if !used_relay_parents.contains(&pending.descriptor.relay_parent()) {
-				send_block_header(
-					virtual_overseer,
-					pending.descriptor.relay_parent(),
-					pending.relay_parent_number,
-				)
-				.await;
-
-				used_relay_parents.insert(pending.descriptor.relay_parent());
+		if let Some(para_id) = para_id {
+			for pending in leaf.para_data(para_id).pending_availability.clone() {
+				if !used_relay_parents.contains(&pending.descriptor.relay_parent()) {
+					send_block_header(
+						virtual_overseer,
+						pending.descriptor.relay_parent(),
+						pending.relay_parent_number,
+					)
+					.await;
+
+					used_relay_parents.insert(pending.descriptor.relay_parent());
+				}
 			}
 		}
 	}
@@ -416,7 +487,9 @@ async fn handle_leaf_activation(
 			msg: ProspectiveParachainsMessage::GetMinimumRelayParents(*hash, tx),
 		})
 		.await;
+
 	let mut resp = rx.await.unwrap();
+
 	resp.sort();
 	let mrp_response: Vec<(ParaId, BlockNumber)> = para_data
 		.iter()
@@ -597,7 +670,7 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() {
 			AllMessages::RuntimeApi(
 				RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx))
 			) if parent == hash => {
-				tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap();
+				tx.send(Err(RUNTIME_API_NOT_SUPPORTED)).unwrap();
 			}
 		);
 	}
@@ -616,9 +689,12 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() {
 // - One for leaf B on parachain 1
 // - One for leaf C on parachain 2
 // Also tests a claim queue size larger than 1.
-#[test]
-fn introduce_candidates_basic() {
+#[rstest]
+#[case(RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT)]
+#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)]
+fn introduce_candidates_basic(#[case] runtime_api_version: u32) {
 	let mut test_state = TestState::default();
+	test_state.set_runtime_api_version(runtime_api_version);
 
 	let chain_a = ParaId::from(1);
 	let chain_b = ParaId::from(2);
@@ -786,9 +862,129 @@ fn introduce_candidates_basic() {
 	assert_eq!(view.active_leaves.len(), 3);
 }
 
-#[test]
-fn introduce_candidate_multiple_times() {
-	let test_state = TestState::default();
+// Check if candidates are not backed if they fail constraint checks
+#[rstest]
+#[case(RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT)]
+#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)]
+fn introduce_candidates_error(#[case] runtime_api_version: u32) {
+	let mut test_state = TestState::default();
+	test_state.set_runtime_api_version(runtime_api_version);
+
+	let view = test_harness(|mut virtual_overseer| async move {
+		// Leaf A
+		let leaf_a = TestLeaf {
+			number: 100,
+			hash: Default::default(),
+			para_data: vec![
+				(1.into(), PerParaData::new(98, HeadData(vec![1, 2, 3]))),
+				(2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))),
+			],
+		};
+
+		// Activate leaves.
+		activate_leaf_with_params(
+			&mut virtual_overseer,
+			&leaf_a,
+			&test_state,
+			AsyncBackingParams { allowed_ancestry_len: 3, max_candidate_depth: 1 },
+		)
+		.await;
+
+		// Candidate A.
+		let (candidate_a, pvd_a) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
+			1.into(),
+			HeadData(vec![1, 2, 3]),
+			HeadData(vec![1]),
+			test_state.validation_code_hash,
+		);
+
+		// Candidate B.
+		let (candidate_b, pvd_b) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
+			1.into(),
+			HeadData(vec![1]),
+			HeadData(vec![1; 20480]),
+			test_state.validation_code_hash,
+		);
+
+		// Candidate C commits to oversized head data.
+		let (candidate_c, pvd_c) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
+			1.into(),
+			HeadData(vec![1; 20480]),
+			HeadData(vec![0; 20485]),
+			test_state.validation_code_hash,
+		);
+
+		// Get hypothetical membership of candidates before adding candidate A.
+		// Candidate A can be added directly, candidates B and C are potential candidates.
+		for (candidate, pvd) in
+			[(candidate_a.clone(), pvd_a.clone()), (candidate_b.clone(), pvd_b.clone())]
+		{
+			get_hypothetical_membership(
+				&mut virtual_overseer,
+				candidate.hash(),
+				candidate,
+				pvd,
+				vec![leaf_a.hash],
+			)
+			.await;
+		}
+
+		// Fails constraints check
+		get_hypothetical_membership(
+			&mut virtual_overseer,
+			candidate_c.hash(),
+			candidate_c.clone(),
+			pvd_c.clone(),
+			Vec::new(),
+		)
+		.await;
+
+		// Add candidates
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone())
+			.await;
+		introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone())
+			.await;
+		// Fails constraints check
+		introduce_seconded_candidate_failed(
+			&mut virtual_overseer,
+			candidate_c.clone(),
+			pvd_c.clone(),
+		)
+		.await;
+
+		back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await;
+		back_candidate(&mut virtual_overseer, &candidate_b, candidate_b.hash()).await;
+		// This one will not be backed.
+		back_candidate(&mut virtual_overseer, &candidate_c, candidate_c.hash()).await;
+
+		// Expect only A and B to be backable
+		get_backable_candidates(
+			&mut virtual_overseer,
+			&leaf_a,
+			1.into(),
+			Ancestors::default(),
+			5,
+			vec![(candidate_a.hash(), leaf_a.hash), (candidate_b.hash(), leaf_a.hash)],
+		)
+		.await;
+		virtual_overseer
+	});
+
+	assert_eq!(view.active_leaves.len(), 1);
+}
+
+#[rstest]
+#[case(RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT)]
+#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)]
+fn introduce_candidate_multiple_times(#[case] runtime_api_version: u32) {
+	let mut test_state = TestState::default();
+	test_state.set_runtime_api_version(runtime_api_version);
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
 		let leaf_a = TestLeaf {
@@ -1172,9 +1368,12 @@ fn introduce_candidate_parent_leaving_view() {
 }
 
 // Introduce a candidate to multiple forks, see how the membership is returned.
-#[test]
-fn introduce_candidate_on_multiple_forks() {
-	let test_state = TestState::default();
+#[rstest]
+#[case(RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT)]
+#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)]
+fn introduce_candidate_on_multiple_forks(#[case] runtime_api_version: u32) {
+	let mut test_state = TestState::default();
+	test_state.set_runtime_api_version(runtime_api_version);
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf B
 		let leaf_b = TestLeaf {
@@ -1241,11 +1440,14 @@ fn introduce_candidate_on_multiple_forks() {
 	assert_eq!(view.active_leaves.len(), 2);
 }
 
-#[test]
-fn unconnected_candidates_become_connected() {
+#[rstest]
+#[case(RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT)]
+#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)]
+fn unconnected_candidates_become_connected(#[case] runtime_api_version: u32) {
 	// This doesn't test all the complicated cases with many unconnected candidates, as it's more
 	// extensively tested in the `fragment_chain::tests` module.
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
+	test_state.set_runtime_api_version(runtime_api_version);
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
 		let leaf_a = TestLeaf {
@@ -1483,9 +1685,14 @@ fn check_backable_query_single_candidate() {
 }
 
 // Backs some candidates and tests `GetBackableCandidates` when requesting a multiple candidates.
-#[test]
-fn check_backable_query_multiple_candidates() {
-	let test_state = TestState::default();
+#[rstest]
+#[case(RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT)]
+#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)]
+fn check_backable_query_multiple_candidates(#[case] runtime_api_version: u32) {
+	// This doesn't test all the complicated cases with many unconnected candidates, as it's more
+	// extensively tested in the `fragment_chain::tests` module.
+	let mut test_state = TestState::default();
+	test_state.set_runtime_api_version(runtime_api_version);
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
 		let leaf_a = TestLeaf {
@@ -1755,9 +1962,13 @@ fn check_backable_query_multiple_candidates() {
 }
 
 // Test hypothetical membership query.
-#[test]
-fn check_hypothetical_membership_query() {
-	let test_state = TestState::default();
+#[rstest]
+#[case(RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT)]
+#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)]
+fn check_hypothetical_membership_query(#[case] runtime_api_version: u32) {
+	let mut test_state = TestState::default();
+	test_state.set_runtime_api_version(runtime_api_version);
+
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf B
 		let leaf_b = TestLeaf {
@@ -1894,6 +2105,17 @@ fn check_hypothetical_membership_query() {
 		);
 		introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_d, pvd_d).await;
 
+		// Candidate E has invalid head data.
+		let (candidate_e, pvd_e) = make_candidate(
+			leaf_a.hash,
+			leaf_a.number,
+			1.into(),
+			HeadData(vec![2]),
+			HeadData(vec![0; 20481]),
+			test_state.validation_code_hash,
+		);
+		introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_e, pvd_e).await;
+
 		// Add candidate B and back it.
 		introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone())
 			.await;
@@ -1921,9 +2143,14 @@ fn check_hypothetical_membership_query() {
 	assert_eq!(view.active_leaves.len(), 2);
 }
 
-#[test]
-fn check_pvd_query() {
-	let test_state = TestState::default();
+#[rstest]
+#[case(RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT)]
+#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)]
+fn check_pvd_query(#[case] runtime_api_version: u32) {
+	// This doesn't test all the complicated cases with many unconnected candidates, as it's more
+	// extensively tested in the `fragment_chain::tests` module.
+	let mut test_state = TestState::default();
+	test_state.set_runtime_api_version(runtime_api_version);
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
 		let leaf_a = TestLeaf {
@@ -2061,6 +2288,7 @@ fn check_pvd_query() {
 // This test is parametrised with the runtime api version. For versions that don't support the claim
 // queue API, we check that av-cores are used.
 #[rstest]
+#[case(RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT)]
 #[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)]
 #[case(8)]
 fn correctly_updates_leaves(#[case] runtime_api_version: u32) {
@@ -2098,6 +2326,7 @@ fn correctly_updates_leaves(#[case] runtime_api_version: u32) {
 
 		// Activate leaves.
 		activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await;
+
 		activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await;
 
 		// Try activating a duplicate leaf.
@@ -2161,10 +2390,15 @@ fn correctly_updates_leaves(#[case] runtime_api_version: u32) {
 	assert_eq!(view.active_leaves.len(), 0);
 }
 
-#[test]
-fn handle_active_leaves_update_gets_candidates_from_parent() {
-	let para_id = ParaId::from(1);
+#[rstest]
+#[case(RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT)]
+#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)]
+fn handle_active_leaves_update_gets_candidates_from_parent(#[case] runtime_api_version: u32) {
+	// This doesn't test all the complicated cases with many unconnected candidates, as it's more
+	// extensively tested in the `fragment_chain::tests` module.
 	let mut test_state = TestState::default();
+	test_state.set_runtime_api_version(runtime_api_version);
+	let para_id = ParaId::from(1);
 	test_state.claim_queue = test_state
 		.claim_queue
 		.into_iter()
@@ -2477,9 +2711,14 @@ fn handle_active_leaves_update_bounded_implicit_view() {
 	);
 }
 
-#[test]
-fn persists_pending_availability_candidate() {
+#[rstest]
+#[case(RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT)]
+#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)]
+fn persists_pending_availability_candidate(#[case] runtime_api_version: u32) {
+	// This doesn't test all the complicated cases with many unconnected candidates, as it's more
+	// extensively tested in the `fragment_chain::tests` module.
 	let mut test_state = TestState::default();
+	test_state.set_runtime_api_version(runtime_api_version);
 	let para_id = ParaId::from(1);
 	test_state.claim_queue = test_state
 		.claim_queue
diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs
index 7246010711e..8a885ea9cc9 100644
--- a/polkadot/node/core/runtime-api/src/cache.rs
+++ b/polkadot/node/core/runtime-api/src/cache.rs
@@ -20,10 +20,10 @@ use schnellru::{ByLength, LruMap};
 use sp_consensus_babe::Epoch;
 
 use polkadot_primitives::{
-	async_backing, slashing, vstaging,
+	async_backing, slashing,
 	vstaging::{
-		CandidateEvent, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState,
-		ScrapedOnChainVotes,
+		self, async_backing::Constraints, CandidateEvent,
+		CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, ScrapedOnChainVotes,
 	},
 	ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateHash,
 	CoreIndex, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId,
@@ -75,6 +75,7 @@ pub(crate) struct RequestResultCache {
 	node_features: LruMap<SessionIndex, NodeFeatures>,
 	approval_voting_params: LruMap<SessionIndex, ApprovalVotingParams>,
 	claim_queue: LruMap<Hash, BTreeMap<CoreIndex, VecDeque<ParaId>>>,
+	backing_constraints: LruMap<(Hash, ParaId), Option<Constraints>>,
 }
 
 impl Default for RequestResultCache {
@@ -112,6 +113,7 @@ impl Default for RequestResultCache {
 			async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)),
 			node_features: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)),
 			claim_queue: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)),
+			backing_constraints: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)),
 		}
 	}
 }
@@ -559,6 +561,21 @@ impl RequestResultCache {
 	) {
 		self.claim_queue.insert(relay_parent, value);
 	}
+
+	pub(crate) fn backing_constraints(
+		&mut self,
+		key: (Hash, ParaId),
+	) -> Option<&Option<Constraints>> {
+		self.backing_constraints.get(&key).map(|v| &*v)
+	}
+
+	pub(crate) fn cache_backing_constraints(
+		&mut self,
+		key: (Hash, ParaId),
+		value: Option<Constraints>,
+	) {
+		self.backing_constraints.insert(key, value);
+	}
 }
 
 pub(crate) enum RequestResult {
@@ -610,4 +627,5 @@ pub(crate) enum RequestResult {
 	NodeFeatures(SessionIndex, NodeFeatures),
 	ClaimQueue(Hash, BTreeMap<CoreIndex, VecDeque<ParaId>>),
 	CandidatesPendingAvailability(Hash, ParaId, Vec<CommittedCandidateReceipt>),
+	BackingConstraints(Hash, ParaId, Option<Constraints>),
 }
diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs
index c8b1d61e7be..4889822b46a 100644
--- a/polkadot/node/core/runtime-api/src/lib.rs
+++ b/polkadot/node/core/runtime-api/src/lib.rs
@@ -183,6 +183,9 @@ where
 			ClaimQueue(relay_parent, sender) => {
 				self.requests_cache.cache_claim_queue(relay_parent, sender);
 			},
+			BackingConstraints(relay_parent, para_id, constraints) => self
+				.requests_cache
+				.cache_backing_constraints((relay_parent, para_id), constraints),
 		}
 	}
 
@@ -340,6 +343,8 @@ where
 			},
 			Request::ClaimQueue(sender) =>
 				query!(claim_queue(), sender).map(|sender| Request::ClaimQueue(sender)),
+			Request::BackingConstraints(para, sender) => query!(backing_constraints(para), sender)
+				.map(|sender| Request::BackingConstraints(para, sender)),
 		}
 	}
 
@@ -652,5 +657,13 @@ where
 			ver = Request::CLAIM_QUEUE_RUNTIME_REQUIREMENT,
 			sender
 		),
+		Request::BackingConstraints(para, sender) => {
+			query!(
+				BackingConstraints,
+				backing_constraints(para),
+				ver = Request::CONSTRAINTS_RUNTIME_REQUIREMENT,
+				sender
+			)
+		},
 	}
 }
diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs
index d4fa0732388..56c60876957 100644
--- a/polkadot/node/core/runtime-api/src/tests.rs
+++ b/polkadot/node/core/runtime-api/src/tests.rs
@@ -22,8 +22,8 @@ use polkadot_node_subsystem_test_helpers::make_subsystem_context;
 use polkadot_primitives::{
 	async_backing, slashing, vstaging,
 	vstaging::{
-		CandidateEvent, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState,
-		ScrapedOnChainVotes,
+		async_backing::Constraints, CandidateEvent,
+		CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, ScrapedOnChainVotes,
 	},
 	ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateHash,
 	CoreIndex, DisputeState, ExecutorParams, GroupRotationInfo, Id as ParaId,
@@ -307,6 +307,14 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient {
 	) -> Result<BTreeMap<CoreIndex, VecDeque<ParaId>>, ApiError> {
 		todo!("Not required for tests")
 	}
+
+	async fn backing_constraints(
+		&self,
+		_at: Hash,
+		_para_id: ParaId,
+	) -> Result<Option<Constraints>, ApiError> {
+		todo!("Not required for tests")
+	}
 }
 
 #[test]
diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs
index b541f951921..8a3b91b3ec7 100644
--- a/polkadot/node/subsystem-types/src/messages.rs
+++ b/polkadot/node/subsystem-types/src/messages.rs
@@ -42,9 +42,9 @@ use polkadot_node_primitives::{
 	ValidationResult,
 };
 use polkadot_primitives::{
-	async_backing, slashing, vstaging,
+	async_backing, slashing,
 	vstaging::{
-		BackedCandidate, CandidateReceiptV2 as CandidateReceipt,
+		self, async_backing::Constraints, BackedCandidate, CandidateReceiptV2 as CandidateReceipt,
 		CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState,
 	},
 	ApprovalVotingParams, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateHash,
@@ -772,6 +772,9 @@ pub enum RuntimeApiRequest {
 	/// Get the candidates pending availability for a particular parachain
 	/// `V11`
 	CandidatesPendingAvailability(ParaId, RuntimeApiSender<Vec<CommittedCandidateReceipt>>),
+	/// Get the backing constraints for a particular parachain.
+	/// `V12`
+	BackingConstraints(ParaId, RuntimeApiSender<Option<Constraints>>),
 }
 
 impl RuntimeApiRequest {
@@ -812,6 +815,9 @@ impl RuntimeApiRequest {
 
 	/// `candidates_pending_availability`
 	pub const CANDIDATES_PENDING_AVAILABILITY_RUNTIME_REQUIREMENT: u32 = 11;
+
+	/// `backing_constraints`
+	pub const CONSTRAINTS_RUNTIME_REQUIREMENT: u32 = 12;
 }
 
 /// A message to the Runtime API subsystem.
diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs
index 4b96009f44b..018b52bedcd 100644
--- a/polkadot/node/subsystem-types/src/runtime_client.rs
+++ b/polkadot/node/subsystem-types/src/runtime_client.rs
@@ -18,10 +18,10 @@ use async_trait::async_trait;
 use polkadot_primitives::{
 	async_backing,
 	runtime_api::ParachainHost,
-	slashing, vstaging,
+	slashing,
 	vstaging::{
-		CandidateEvent, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState,
-		ScrapedOnChainVotes,
+		self, async_backing::Constraints, CandidateEvent,
+		CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, ScrapedOnChainVotes,
 	},
 	ApprovalVotingParams, Block, BlockNumber, CandidateCommitments, CandidateHash, CoreIndex,
 	DisputeState, ExecutorParams, GroupRotationInfo, Hash, Header, Id, InboundDownwardMessage,
@@ -347,6 +347,15 @@ pub trait RuntimeApiSubsystemClient {
 		at: Hash,
 		para_id: Id,
 	) -> Result<Vec<CommittedCandidateReceipt<Hash>>, ApiError>;
+
+	// == v12 ==
+	/// Get the constraints on the actions that can be taken by a new parachain
+	/// block.
+	async fn backing_constraints(
+		&self,
+		at: Hash,
+		para_id: Id,
+	) -> Result<Option<Constraints>, ApiError>;
 }
 
 /// Default implementation of [`RuntimeApiSubsystemClient`] using the client.
@@ -624,6 +633,14 @@ where
 	async fn claim_queue(&self, at: Hash) -> Result<BTreeMap<CoreIndex, VecDeque<Id>>, ApiError> {
 		self.client.runtime_api().claim_queue(at)
 	}
+
+	async fn backing_constraints(
+		&self,
+		at: Hash,
+		para_id: Id,
+	) -> Result<Option<Constraints>, ApiError> {
+		self.client.runtime_api().backing_constraints(at, para_id)
+	}
 }
 
 impl<Client, Block> HeaderBackend<Block> for DefaultSubsystemClient<Client>
diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs
index 48d3f27b1fa..8a620db4ab0 100644
--- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs
+++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs
@@ -82,9 +82,10 @@
 /// in practice at most once every few weeks.
 use polkadot_node_subsystem::messages::HypotheticalCandidate;
 use polkadot_primitives::{
-	async_backing::Constraints as PrimitiveConstraints, vstaging::skip_ump_signals, BlockNumber,
-	CandidateCommitments, CandidateHash, Hash, HeadData, Id as ParaId, PersistedValidationData,
-	UpgradeRestriction, ValidationCodeHash,
+	async_backing::Constraints as OldPrimitiveConstraints,
+	vstaging::{async_backing::Constraints as PrimitiveConstraints, skip_ump_signals},
+	BlockNumber, CandidateCommitments, CandidateHash, Hash, HeadData, Id as ParaId,
+	PersistedValidationData, UpgradeRestriction, ValidationCodeHash,
 };
 use std::{collections::HashMap, sync::Arc};
 
@@ -115,6 +116,8 @@ pub struct Constraints {
 	pub max_pov_size: usize,
 	/// The maximum new validation code size allowed, in bytes.
 	pub max_code_size: usize,
+	/// The maximum head-data size, in bytes.
+	pub max_head_data_size: usize,
 	/// The amount of UMP messages remaining.
 	pub ump_remaining: usize,
 	/// The amount of UMP bytes remaining.
@@ -146,6 +149,44 @@ impl From<PrimitiveConstraints> for Constraints {
 			min_relay_parent_number: c.min_relay_parent_number,
 			max_pov_size: c.max_pov_size as _,
 			max_code_size: c.max_code_size as _,
+			max_head_data_size: c.max_head_data_size as _,
+			ump_remaining: c.ump_remaining as _,
+			ump_remaining_bytes: c.ump_remaining_bytes as _,
+			max_ump_num_per_candidate: c.max_ump_num_per_candidate as _,
+			dmp_remaining_messages: c.dmp_remaining_messages,
+			hrmp_inbound: InboundHrmpLimitations {
+				valid_watermarks: c.hrmp_inbound.valid_watermarks,
+			},
+			hrmp_channels_out: c
+				.hrmp_channels_out
+				.into_iter()
+				.map(|(para_id, limits)| {
+					(
+						para_id,
+						OutboundHrmpChannelLimitations {
+							bytes_remaining: limits.bytes_remaining as _,
+							messages_remaining: limits.messages_remaining as _,
+						},
+					)
+				})
+				.collect(),
+			max_hrmp_num_per_candidate: c.max_hrmp_num_per_candidate as _,
+			required_parent: c.required_parent,
+			validation_code_hash: c.validation_code_hash,
+			upgrade_restriction: c.upgrade_restriction,
+			future_validation_code: c.future_validation_code,
+		}
+	}
+}
+
+impl From<OldPrimitiveConstraints> for Constraints {
+	fn from(c: OldPrimitiveConstraints) -> Self {
+		Constraints {
+			min_relay_parent_number: c.min_relay_parent_number,
+			max_pov_size: c.max_pov_size as _,
+			max_code_size: c.max_code_size as _,
+			// Equal to Polkadot/Kusama config.
+			max_head_data_size: 20480,
 			ump_remaining: c.ump_remaining as _,
 			ump_remaining_bytes: c.ump_remaining_bytes as _,
 			max_ump_num_per_candidate: c.max_ump_num_per_candidate as _,
@@ -520,6 +561,10 @@ pub enum FragmentValidityError {
 	///
 	/// Max allowed, new.
 	CodeSizeTooLarge(usize, usize),
+	/// Head data size too big.
+	///
+	/// Max allowed, new.
+	HeadDataTooLarge(usize, usize),
 	/// Relay parent too old.
 	///
 	/// Min allowed, current.
@@ -686,28 +731,13 @@ impl Fragment {
 	}
 }
 
-fn validate_against_constraints(
+/// Validates if the candidate commitments are obeying the constraints.
+pub fn validate_commitments(
 	constraints: &Constraints,
 	relay_parent: &RelayChainBlockInfo,
 	commitments: &CandidateCommitments,
-	persisted_validation_data: &PersistedValidationData,
 	validation_code_hash: &ValidationCodeHash,
-	modifications: &ConstraintModifications,
 ) -> Result<(), FragmentValidityError> {
-	let expected_pvd = PersistedValidationData {
-		parent_head: constraints.required_parent.clone(),
-		relay_parent_number: relay_parent.number,
-		relay_parent_storage_root: relay_parent.storage_root,
-		max_pov_size: constraints.max_pov_size as u32,
-	};
-
-	if expected_pvd != *persisted_validation_data {
-		return Err(FragmentValidityError::PersistedValidationDataMismatch(
-			expected_pvd,
-			persisted_validation_data.clone(),
-		))
-	}
-
 	if constraints.validation_code_hash != *validation_code_hash {
 		return Err(FragmentValidityError::ValidationCodeMismatch(
 			constraints.validation_code_hash,
@@ -715,6 +745,13 @@ fn validate_against_constraints(
 		))
 	}
 
+	if commitments.head_data.0.len() > constraints.max_head_data_size {
+		return Err(FragmentValidityError::HeadDataTooLarge(
+			constraints.max_head_data_size,
+			commitments.head_data.0.len(),
+		))
+	}
+
 	if relay_parent.number < constraints.min_relay_parent_number {
 		return Err(FragmentValidityError::RelayParentTooOld(
 			constraints.min_relay_parent_number,
@@ -740,6 +777,39 @@ fn validate_against_constraints(
 		))
 	}
 
+	if commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate {
+		return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow {
+			messages_allowed: constraints.max_hrmp_num_per_candidate,
+			messages_submitted: commitments.horizontal_messages.len(),
+		})
+	}
+
+	Ok(())
+}
+
+fn validate_against_constraints(
+	constraints: &Constraints,
+	relay_parent: &RelayChainBlockInfo,
+	commitments: &CandidateCommitments,
+	persisted_validation_data: &PersistedValidationData,
+	validation_code_hash: &ValidationCodeHash,
+	modifications: &ConstraintModifications,
+) -> Result<(), FragmentValidityError> {
+	validate_commitments(constraints, relay_parent, commitments, validation_code_hash)?;
+
+	let expected_pvd = PersistedValidationData {
+		parent_head: constraints.required_parent.clone(),
+		relay_parent_number: relay_parent.number,
+		relay_parent_storage_root: relay_parent.storage_root,
+		max_pov_size: constraints.max_pov_size as u32,
+	};
+
+	if expected_pvd != *persisted_validation_data {
+		return Err(FragmentValidityError::PersistedValidationDataMismatch(
+			expected_pvd,
+			persisted_validation_data.clone(),
+		))
+	}
 	if modifications.dmp_messages_processed == 0 {
 		if constraints
 			.dmp_remaining_messages
@@ -750,20 +820,12 @@ fn validate_against_constraints(
 		}
 	}
 
-	if commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate {
-		return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow {
-			messages_allowed: constraints.max_hrmp_num_per_candidate,
-			messages_submitted: commitments.horizontal_messages.len(),
-		})
-	}
-
 	if modifications.ump_messages_sent > constraints.max_ump_num_per_candidate {
 		return Err(FragmentValidityError::UmpMessagesPerCandidateOverflow {
 			messages_allowed: constraints.max_ump_num_per_candidate,
 			messages_submitted: commitments.upward_messages.len(),
 		})
 	}
-
 	constraints
 		.check_modifications(&modifications)
 		.map_err(FragmentValidityError::OutputsInvalid)
@@ -971,6 +1033,7 @@ mod tests {
 			validation_code_hash: ValidationCode(vec![4, 5, 6]).hash(),
 			upgrade_restriction: None,
 			future_validation_code: None,
+			max_head_data_size: 1024,
 		}
 	}
 
@@ -1478,4 +1541,24 @@ mod tests {
 			Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)),
 		);
 	}
+
+	#[test]
+	fn head_data_size_too_large() {
+		let relay_parent = RelayChainBlockInfo {
+			number: 6,
+			hash: Hash::repeat_byte(0xcc),
+			storage_root: Hash::repeat_byte(0xff),
+		};
+
+		let constraints = make_constraints();
+		let mut candidate = make_candidate(&constraints, &relay_parent);
+
+		let head_data_size = constraints.max_head_data_size;
+		candidate.commitments.head_data = vec![0; head_data_size + 1].into();
+
+		assert_eq!(
+			Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())),
+			Err(FragmentValidityError::HeadDataTooLarge(head_data_size, head_data_size + 1)),
+		);
+	}
 }
diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs
index 3bed1855894..6b069ee8611 100644
--- a/polkadot/node/subsystem-util/src/lib.rs
+++ b/polkadot/node/subsystem-util/src/lib.rs
@@ -43,8 +43,9 @@ use futures::channel::{mpsc, oneshot};
 use polkadot_primitives::{
 	slashing,
 	vstaging::{
-		async_backing::BackingState, CandidateEvent,
-		CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, ScrapedOnChainVotes,
+		async_backing::{BackingState, Constraints},
+		CandidateEvent, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState,
+		ScrapedOnChainVotes,
 	},
 	AsyncBackingParams, AuthorityDiscoveryId, CandidateHash, CoreIndex, EncodeAs, ExecutorParams,
 	GroupIndex, GroupRotationInfo, Hash, Id as ParaId, OccupiedCoreAssumption,
@@ -313,6 +314,8 @@ specialize_requests! {
 	fn request_async_backing_params() -> AsyncBackingParams; AsyncBackingParams;
 	fn request_claim_queue() -> BTreeMap<CoreIndex, VecDeque<ParaId>>; ClaimQueue;
 	fn request_para_backing_state(para_id: ParaId) -> Option<BackingState>; ParaBackingState;
+	fn request_backing_constraints(para_id: ParaId) -> Option<Constraints>; BackingConstraints;
+
 }
 
 /// Requests executor parameters from the runtime effective at given relay-parent. First obtains
diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs
index 3c90c050bae..df1dfbac400 100644
--- a/polkadot/primitives/src/runtime_api.rs
+++ b/polkadot/primitives/src/runtime_api.rs
@@ -116,8 +116,8 @@
 use crate::{
 	slashing,
 	vstaging::{
-		self, CandidateEvent, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState,
-		ScrapedOnChainVotes,
+		self, async_backing::Constraints, CandidateEvent,
+		CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, ScrapedOnChainVotes,
 	},
 	ApprovalVotingParams, AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateHash,
 	CoreIndex, DisputeState, ExecutorParams, GroupRotationInfo, Hash, NodeFeatures,
@@ -297,5 +297,11 @@ sp_api::decl_runtime_apis! {
 		/// Elastic scaling support
 		#[api_version(11)]
 		fn candidates_pending_availability(para_id: ppp::Id) -> Vec<CommittedCandidateReceipt<Hash>>;
+
+		/***** Added in v12 *****/
+		/// Returns the constraints on the actions that can be taken by a new parachain
+		/// block.
+		#[api_version(12)]
+		fn backing_constraints(para_id: ppp::Id) -> Option<Constraints>;
 	}
 }
diff --git a/polkadot/primitives/src/vstaging/async_backing.rs b/polkadot/primitives/src/vstaging/async_backing.rs
index 8706214b5a0..ce995453805 100644
--- a/polkadot/primitives/src/vstaging/async_backing.rs
+++ b/polkadot/primitives/src/vstaging/async_backing.rs
@@ -50,12 +50,50 @@ impl<H: Copy> From<CandidatePendingAvailability<H>>
 	}
 }
 
+/// Constraints on the actions that can be taken by a new parachain
+/// block. These limitations are implicitly associated with some particular
+/// parachain, which should be apparent from usage.
+#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)]
+pub struct Constraints<N = BlockNumber> {
+	/// The minimum relay-parent number accepted under these constraints.
+	pub min_relay_parent_number: N,
+	/// The maximum Proof-of-Validity size allowed, in bytes.
+	pub max_pov_size: u32,
+	/// The maximum new validation code size allowed, in bytes.
+	pub max_code_size: u32,
+	/// The maximum head-data size, in bytes.
+	pub max_head_data_size: u32,
+	/// The amount of UMP messages remaining.
+	pub ump_remaining: u32,
+	/// The amount of UMP bytes remaining.
+	pub ump_remaining_bytes: u32,
+	/// The maximum number of UMP messages allowed per candidate.
+	pub max_ump_num_per_candidate: u32,
+	/// Remaining DMP queue. Only includes sent-at block numbers.
+	pub dmp_remaining_messages: Vec<N>,
+	/// The limitations of all registered inbound HRMP channels.
+	pub hrmp_inbound: InboundHrmpLimitations<N>,
+	/// The limitations of all registered outbound HRMP channels.
+	pub hrmp_channels_out: Vec<(Id, OutboundHrmpChannelLimitations)>,
+	/// The maximum number of HRMP messages allowed per candidate.
+	pub max_hrmp_num_per_candidate: u32,
+	/// The required parent head-data of the parachain.
+	pub required_parent: HeadData,
+	/// The expected validation-code-hash of this parachain.
+	pub validation_code_hash: ValidationCodeHash,
+	/// The code upgrade restriction signal as-of this parachain.
+	pub upgrade_restriction: Option<UpgradeRestriction>,
+	/// The future validation code hash, if any, and at what relay-parent
+	/// number the upgrade would be minimally applied.
+	pub future_validation_code: Option<(N, ValidationCodeHash)>,
+}
+
 /// The per-parachain state of the backing system, including
 /// state-machine constraints and candidates pending availability.
 #[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)]
 pub struct BackingState<H = Hash, N = BlockNumber> {
 	/// The state-machine constraints of the parachain.
-	pub constraints: Constraints<N>,
+	pub constraints: crate::async_backing::Constraints<N>,
 	/// The candidates pending availability. These should be ordered, i.e. they should form
 	/// a sub-chain, where the first candidate builds on top of the required parent of the
 	/// constraints and each subsequent builds on top of the previous head-data.
diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs
index c52f3539c3e..5da4595af65 100644
--- a/polkadot/primitives/src/vstaging/mod.rs
+++ b/polkadot/primitives/src/vstaging/mod.rs
@@ -19,10 +19,11 @@ use crate::{ValidatorIndex, ValidityAttestation};
 
 // Put any primitives used by staging APIs functions here
 use super::{
-	async_backing::Constraints, BlakeTwo256, BlockNumber, CandidateCommitments,
-	CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, CoreIndex, GroupIndex, Hash,
-	HashT, HeadData, Header, Id, Id as ParaId, MultiDisputeStatementSet, ScheduledCore,
-	UncheckedSignedAvailabilityBitfields, ValidationCodeHash,
+	async_backing::{InboundHrmpLimitations, OutboundHrmpChannelLimitations},
+	BlakeTwo256, BlockNumber, CandidateCommitments, CandidateDescriptor, CandidateHash, CollatorId,
+	CollatorSignature, CoreIndex, GroupIndex, Hash, HashT, HeadData, Header, Id, Id as ParaId,
+	MultiDisputeStatementSet, ScheduledCore, UncheckedSignedAvailabilityBitfields,
+	UpgradeRestriction, ValidationCodeHash,
 };
 use alloc::{
 	collections::{BTreeMap, BTreeSet, VecDeque},
diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md
index 61278621cf5..0f210a07864 100644
--- a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md
+++ b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md
@@ -126,6 +126,9 @@ prospective validation data. This is unlikely to change.
 - `RuntimeApiRequest::ParaBackingState`
   - Gets the backing state of the given para (the constraints of the para and
     candidates pending availability).
+- `RuntimeApiRequest::BackingConstraints`
+  - Gets the constraints on the actions that can be taken by a new parachain
+    block.
 - `RuntimeApiRequest::AvailabilityCores`
   - Gets information on all availability cores.
 - `ChainApiMessage::Ancestors`
diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs
index e9327bc7641..3f2cb577109 100644
--- a/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs
+++ b/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs
@@ -401,10 +401,10 @@ pub fn minimum_backing_votes<T: initializer::Config>() -> u32 {
 	configuration::ActiveConfig::<T>::get().minimum_backing_votes
 }
 
-/// Implementation for `ParaBackingState` function from the runtime API
-pub fn backing_state<T: initializer::Config>(
+// Helper function that returns the backing constraints given a parachain id.
+pub(crate) fn backing_constraints<T: initializer::Config>(
 	para_id: ParaId,
-) -> Option<BackingState<T::Hash, BlockNumberFor<T>>> {
+) -> Option<Constraints<BlockNumberFor<T>>> {
 	let config = configuration::ActiveConfig::<T>::get();
 	// Async backing is only expected to be enabled with a tracker capacity of 1.
 	// Subsequent configuration update gets applied on new session, which always
@@ -458,7 +458,7 @@ pub fn backing_state<T: initializer::Config>(
 		})
 		.collect();
 
-	let constraints = Constraints {
+	Some(Constraints {
 		min_relay_parent_number,
 		max_pov_size: config.max_pov_size,
 		max_code_size: config.max_code_size,
@@ -473,7 +473,16 @@ pub fn backing_state<T: initializer::Config>(
 		validation_code_hash,
 		upgrade_restriction,
 		future_validation_code,
-	};
+	})
+}
+
+/// Implementation for `ParaBackingState` function from the runtime API
+#[deprecated(note = "`backing_state` will be removed. Use `backing_constraints` and
+	`candidates_pending_availability` instead.")]
+pub fn backing_state<T: initializer::Config>(
+	para_id: ParaId,
+) -> Option<BackingState<T::Hash, BlockNumberFor<T>>> {
+	let constraints = backing_constraints::<T>(para_id)?;
 
 	let pending_availability = {
 		crate::inclusion::PendingAvailability::<T>::get(&para_id)
diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs
index d01b543630c..52a9a9e1228 100644
--- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs
+++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs
@@ -15,3 +15,33 @@
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
 //! Put implementations of functions from staging APIs here.
+
+use crate::{configuration, initializer};
+use frame_system::pallet_prelude::*;
+use polkadot_primitives::{vstaging::async_backing::Constraints, Id as ParaId};
+
+/// Implementation for `constraints` function from the runtime API
+pub fn backing_constraints<T: initializer::Config>(
+	para_id: ParaId,
+) -> Option<Constraints<BlockNumberFor<T>>> {
+	let config = configuration::ActiveConfig::<T>::get();
+	let constraints_v11 = super::v11::backing_constraints::<T>(para_id)?;
+
+	Some(Constraints {
+		min_relay_parent_number: constraints_v11.min_relay_parent_number,
+		max_pov_size: constraints_v11.max_pov_size,
+		max_code_size: constraints_v11.max_code_size,
+		max_head_data_size: config.max_head_data_size,
+		ump_remaining: constraints_v11.ump_remaining,
+		ump_remaining_bytes: constraints_v11.ump_remaining_bytes,
+		max_ump_num_per_candidate: constraints_v11.max_ump_num_per_candidate,
+		dmp_remaining_messages: constraints_v11.dmp_remaining_messages,
+		hrmp_inbound: constraints_v11.hrmp_inbound,
+		hrmp_channels_out: constraints_v11.hrmp_channels_out,
+		max_hrmp_num_per_candidate: constraints_v11.max_hrmp_num_per_candidate,
+		required_parent: constraints_v11.required_parent,
+		validation_code_hash: constraints_v11.validation_code_hash,
+		upgrade_restriction: constraints_v11.upgrade_restriction,
+		future_validation_code: constraints_v11.future_validation_code,
+	})
+}
diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs
index c2c3d35ee5b..f165091beda 100644
--- a/polkadot/runtime/rococo/src/lib.rs
+++ b/polkadot/runtime/rococo/src/lib.rs
@@ -49,8 +49,8 @@ use pallet_nis::WithMaximumOf;
 use polkadot_primitives::{
 	slashing,
 	vstaging::{
-		CandidateEvent, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState,
-		ScrapedOnChainVotes,
+		async_backing::Constraints, CandidateEvent,
+		CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, ScrapedOnChainVotes,
 	},
 	AccountId, AccountIndex, ApprovalVotingParams, Balance, BlockNumber, CandidateHash, CoreIndex,
 	DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage,
@@ -78,7 +78,9 @@ use polkadot_runtime_parachains::{
 	initializer as parachains_initializer, on_demand as parachains_on_demand,
 	origin as parachains_origin, paras as parachains_paras,
 	paras_inherent as parachains_paras_inherent,
-	runtime_api_impl::v11 as parachains_runtime_api_impl,
+	runtime_api_impl::{
+		v11 as parachains_runtime_api_impl, vstaging as parachains_runtime_vstaging_api_impl,
+	},
 	scheduler as parachains_scheduler, session_info as parachains_session_info,
 	shared as parachains_shared,
 };
@@ -1984,7 +1986,7 @@ sp_api::impl_runtime_apis! {
 		}
 	}
 
-	#[api_version(11)]
+	#[api_version(12)]
 	impl polkadot_primitives::runtime_api::ParachainHost<Block> for Runtime {
 		fn validators() -> Vec<ValidatorId> {
 			parachains_runtime_api_impl::validators::<Runtime>()
@@ -2122,6 +2124,7 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn para_backing_state(para_id: ParaId) -> Option<polkadot_primitives::vstaging::async_backing::BackingState> {
+			#[allow(deprecated)]
 			parachains_runtime_api_impl::backing_state::<Runtime>(para_id)
 		}
 
@@ -2148,6 +2151,10 @@ sp_api::impl_runtime_apis! {
 		fn candidates_pending_availability(para_id: ParaId) -> Vec<CommittedCandidateReceipt<Hash>> {
 			parachains_runtime_api_impl::candidates_pending_availability::<Runtime>(para_id)
 		}
+
+		fn backing_constraints(para_id: ParaId) -> Option<Constraints> {
+			parachains_runtime_vstaging_api_impl::backing_constraints::<Runtime>(para_id)
+		}
 	}
 
 	#[api_version(5)]
diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs
index cdf6fa92da2..4126193388c 100644
--- a/polkadot/runtime/test-runtime/src/lib.rs
+++ b/polkadot/runtime/test-runtime/src/lib.rs
@@ -1067,6 +1067,7 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn para_backing_state(para_id: ParaId) -> Option<polkadot_primitives::vstaging::async_backing::BackingState> {
+			#[allow(deprecated)]
 			runtime_impl::backing_state::<Runtime>(para_id)
 		}
 
diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index a9ba0778fe0..935b62c2338 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -52,8 +52,8 @@ use pallet_transaction_payment::{FeeDetails, FungibleAdapter, RuntimeDispatchInf
 use polkadot_primitives::{
 	slashing,
 	vstaging::{
-		CandidateEvent, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState,
-		ScrapedOnChainVotes,
+		async_backing::Constraints, CandidateEvent,
+		CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, ScrapedOnChainVotes,
 	},
 	AccountId, AccountIndex, ApprovalVotingParams, Balance, BlockNumber, CandidateHash, CoreIndex,
 	DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage,
@@ -84,7 +84,9 @@ use polkadot_runtime_parachains::{
 	initializer as parachains_initializer, on_demand as parachains_on_demand,
 	origin as parachains_origin, paras as parachains_paras,
 	paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points,
-	runtime_api_impl::v11 as parachains_runtime_api_impl,
+	runtime_api_impl::{
+		v11 as parachains_runtime_api_impl, vstaging as parachains_runtime_vstaging_api_impl,
+	},
 	scheduler as parachains_scheduler, session_info as parachains_session_info,
 	shared as parachains_shared,
 };
@@ -2010,7 +2012,7 @@ sp_api::impl_runtime_apis! {
 		}
 	}
 
-	#[api_version(11)]
+	#[api_version(12)]
 	impl polkadot_primitives::runtime_api::ParachainHost<Block> for Runtime {
 		fn validators() -> Vec<ValidatorId> {
 			parachains_runtime_api_impl::validators::<Runtime>()
@@ -2148,6 +2150,7 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn para_backing_state(para_id: ParaId) -> Option<polkadot_primitives::vstaging::async_backing::BackingState> {
+			#[allow(deprecated)]
 			parachains_runtime_api_impl::backing_state::<Runtime>(para_id)
 		}
 
@@ -2174,6 +2177,10 @@ sp_api::impl_runtime_apis! {
 		fn candidates_pending_availability(para_id: ParaId) -> Vec<CommittedCandidateReceipt<Hash>> {
 			parachains_runtime_api_impl::candidates_pending_availability::<Runtime>(para_id)
 		}
+
+		fn backing_constraints(para_id: ParaId) -> Option<Constraints> {
+			parachains_runtime_vstaging_api_impl::backing_constraints::<Runtime>(para_id)
+		}
 	}
 
 	#[api_version(5)]
diff --git a/prdoc/pr_6867.prdoc b/prdoc/pr_6867.prdoc
new file mode 100644
index 00000000000..afa35533d46
--- /dev/null
+++ b/prdoc/pr_6867.prdoc
@@ -0,0 +1,30 @@
+title: Deprecate ParaBackingState API
+doc:
+- audience: [ Runtime Dev, Node Dev ]
+  description: |-
+    Deprecates the `para_backing_state` API. Introduces and new `backing_constraints` API that can be used 
+    together with existing `candidates_pending_availability` to retrieve the same information provided by 
+    `para_backing_state`.
+
+crates:
+- name: polkadot-primitives
+  bump: minor
+- name: polkadot-runtime-parachains
+  bump: minor
+- name: rococo-runtime
+  bump: minor
+- name: westend-runtime
+  bump: minor
+- name: cumulus-relay-chain-rpc-interface
+  bump: minor
+- name: polkadot-node-core-prospective-parachains
+  bump: patch
+- name: polkadot-node-core-runtime-api
+  bump: minor
+- name: polkadot-node-subsystem-types
+  bump: major
+- name: polkadot-node-subsystem-util
+  bump: major
+- name: cumulus-relay-chain-minimal-node
+  bump: minor
+
-- 
GitLab


From f845a9f42614120c98582f598d45d6d831455305 Mon Sep 17 00:00:00 2001
From: Maksym H <1177472+mordamax@users.noreply.github.com>
Date: Fri, 24 Jan 2025 09:36:16 +0000
Subject: [PATCH 108/116] bench all weekly - and fix for pallet_multisig lib
 (#6789)

Closes #6196
Closes #7204

Example of PR: https://github.com/paritytech/polkadot-sdk/pull/6816

Every sunday 01:00 AM it's going to start to benchmark (with /cmd bench)
all runtimes and all pallets
Then diff total will be pushed to a branch and PR open,. I assume
review-bot is going assign required reviewers per changed files

I afraid each weeks will be too much to review & merge, but we can
adjust later

Bonus: fix for pallet_multisig lib and
substrate/.maintain/frame-weight-template.hbs , which didn't let to
compile new weights

---------

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
Co-authored-by: command-bot <>
Co-authored-by: cmd[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
---
 .github/workflows/bench-all-runtimes.yml      | 165 ++++++++++++++++
 .../src/weights/pallet_multisig.rs            | 109 +++++-----
 .../src/weights/pallet_multisig.rs            | 134 +++++++------
 .../src/weights/pallet_multisig.rs            | 134 +++++++------
 .../src/weights/pallet_multisig.rs            | 111 ++++++-----
 .../src/weights/pallet_multisig.rs            | 126 ++++++------
 .../src/weights/pallet_multisig.rs            |  99 +++++-----
 .../src/weights/pallet_multisig.rs            |  97 ++++-----
 .../src/weights/pallet_multisig.rs            | 150 +++++++-------
 .../src/weights/pallet_multisig.rs            | 150 +++++++-------
 .../rococo/src/weights/pallet_multisig.rs     |  89 ++++-----
 .../westend/src/weights/pallet_multisig.rs    | 125 ++++++------
 substrate/.maintain/frame-weight-template.hbs |   3 +-
 substrate/frame/multisig/src/benchmarking.rs  |  22 +--
 substrate/frame/multisig/src/weights.rs       | 186 +++++++++---------
 15 files changed, 949 insertions(+), 751 deletions(-)
 create mode 100644 .github/workflows/bench-all-runtimes.yml

diff --git a/.github/workflows/bench-all-runtimes.yml b/.github/workflows/bench-all-runtimes.yml
new file mode 100644
index 00000000000..a24a7095d98
--- /dev/null
+++ b/.github/workflows/bench-all-runtimes.yml
@@ -0,0 +1,165 @@
+name: Bench all runtimes
+
+on:
+  # schedule:
+    # - cron: '0 1 * * 0' # weekly on Sunday night 01:00 UTC
+  workflow_dispatch:
+  # pull_request:
+
+permissions: # allow the action to create a PR
+  contents: write
+  issues: write
+  pull-requests: write
+  actions: read
+
+jobs:
+  preflight:
+    uses: ./.github/workflows/reusable-preflight.yml
+
+  runtime-matrix:
+    runs-on: ubuntu-latest
+    needs: [preflight]
+    timeout-minutes: 30
+    outputs:
+      runtime: ${{ steps.runtime.outputs.runtime }}
+    container:
+      image: ${{ needs.preflight.outputs.IMAGE }}
+    name: Extract runtimes from matrix
+    steps:
+      - uses: actions/checkout@v4
+      - id: runtime
+        run: |
+          RUNTIMES=$(jq '[.[] | select(.package != null)]' .github/workflows/runtimes-matrix.json)
+
+          RUNTIMES=$(echo $RUNTIMES | jq -c .)
+          echo "runtime=$RUNTIMES"
+          echo "runtime=$RUNTIMES" >> $GITHUB_OUTPUT
+
+  run-frame-omni-bencher:
+    needs: [preflight, runtime-matrix]
+    runs-on: ${{ needs.preflight.outputs.RUNNER_WEIGHTS }}
+    # 24 hours per runtime. 
+    # Max it takes 14hr for westend to recalculate, but due to limited runners,
+    # sometimes it can take longer.
+    timeout-minutes: 1440
+    strategy:
+      fail-fast: false # keep running other workflows even if one fails, to see the logs of all possible failures
+      matrix:
+        runtime: ${{ fromJSON(needs.runtime-matrix.outputs.runtime) }}
+    container:
+      image: ${{ needs.preflight.outputs.IMAGE }}
+    env:
+      PACKAGE_NAME: ${{ matrix.runtime.package }}
+      FLAGS: ${{ matrix.runtime.bench_flags }}
+      RUST_LOG: "frame_omni_bencher=info,polkadot_sdk_frame=info"
+    steps:
+    
+      - name: Checkout
+        uses: actions/checkout@v4
+        with:
+          fetch-depth: 0
+          ref: master
+
+      - name: script
+        id: required
+        run: |
+          # Fixes "detected dubious ownership" error in the ci
+          git config --global --add safe.directory $GITHUB_WORKSPACE
+          git remote -v
+          python3 -m pip install -r .github/scripts/generate-prdoc.requirements.txt
+          python3 .github/scripts/cmd/cmd.py bench --runtime ${{ matrix.runtime.name }}
+          git add .
+          git status
+
+          if [ -f /tmp/cmd/command_output.log ]; then
+            CMD_OUTPUT=$(cat /tmp/cmd/command_output.log)
+            # export to summary to display in the PR
+            echo "$CMD_OUTPUT" >> $GITHUB_STEP_SUMMARY
+            # should be multiline, otherwise it captures the first line only
+            echo 'cmd_output<<EOF' >> $GITHUB_OUTPUT
+            echo "$CMD_OUTPUT" >> $GITHUB_OUTPUT
+            echo 'EOF' >> $GITHUB_OUTPUT
+          fi
+
+          # Create patch that includes both modifications and new files
+          git add -A
+          git diff --staged > diff-${{ matrix.runtime.name }}.patch -U0
+          git reset
+      
+      - name: Upload diff
+        uses: actions/upload-artifact@v4
+        with:
+          name: diff-${{ matrix.runtime.name }}
+          path: diff-${{ matrix.runtime.name }}.patch
+
+  apply-diff-commit:
+    runs-on: ubuntu-latest
+    needs: [run-frame-omni-bencher]
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v4
+        with:
+          fetch-depth: 0
+          ref: master
+
+      - name: Download all artifacts
+        uses: actions/download-artifact@v4
+        with:
+          path: patches
+      
+      - name: Install subweight
+        run: cargo install subweight
+
+      # needs to be able to trigger CI
+      - uses: actions/create-github-app-token@v1
+        id: generate_token
+        with:
+          app-id: ${{ secrets.CMD_BOT_APP_ID }}
+          private-key: ${{ secrets.CMD_BOT_APP_KEY }}
+
+      - name: Apply diff and create PR
+        env:
+          GH_TOKEN: ${{ steps.generate_token.outputs.token }}
+        run: |
+          DATE=$(date +'%Y-%m-%d-%s')
+          BRANCH="update-weights-weekly-$DATE"
+          
+          git config user.name "github-actions[bot]"
+          git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
+          
+          git switch -c "$BRANCH"
+          
+          for file in patches/diff-*/diff-*.patch; do
+            if [ -f "$file" ] && [ -s "$file" ]; then
+              echo "Applying $file"
+              git apply "$file" --unidiff-zero --allow-empty || echo "Failed to apply $file"
+            else
+              echo "Skipping empty or non-existent patch file: $file"
+            fi
+          done
+          rm -rf patches
+          
+          git add .
+          git commit -m "Update all weights weekly for $DATE"
+          git push --set-upstream origin "$BRANCH"
+          
+          PR_TITLE="Auto-update of all weights for $DATE"
+          gh pr create \
+            --title "$PR_TITLE" \
+            --head "$BRANCH" \
+            --base "master" \
+            --reviewer paritytech/ci \
+            --reviewer paritytech/release-engineering \
+            --draft \
+            --label "R0-silent" \
+            --body "$PR_TITLE"
+
+          subweight compare commits \
+            --path-pattern "./**/weights/**/*.rs,./**/weights.rs" \
+            --method asymptotic \
+            --format markdown \
+            --no-color \
+            --change added changed \
+            --ignore-errors \
+            --threshold 2 \
+            origin/master $BRANCH
\ No newline at end of file
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_multisig.rs
index cf9c523f657..1192478c90a 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_multisig.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_multisig.rs
@@ -16,28 +16,28 @@
 
 //! Autogenerated weights for `pallet_multisig`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024
+//! HOSTNAME: `e20fc9f125eb`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
+// --extrinsic=*
 // --chain=asset-hub-rococo-dev
-// --wasm-execution=compiled
 // --pallet=pallet_multisig
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights
+// --wasm-execution=compiled
 // --steps=50
 // --repeat=20
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/
+// --heap-pages=4096
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -55,11 +55,11 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 13_714_000 picoseconds.
-		Weight::from_parts(14_440_231, 0)
+		// Minimum execution time: 16_059_000 picoseconds.
+		Weight::from_parts(17_033_878, 0)
 			.saturating_add(Weight::from_parts(0, 0))
-			// Standard Error: 5
-			.saturating_add(Weight::from_parts(598, 0).saturating_mul(z.into()))
+			// Standard Error: 8
+			.saturating_add(Weight::from_parts(489, 0).saturating_mul(z.into()))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
@@ -67,15 +67,15 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_create(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `262 + s * (2 ±0)`
+		//  Measured:  `295 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 44_768_000 picoseconds.
-		Weight::from_parts(33_662_218, 0)
+		// Minimum execution time: 46_128_000 picoseconds.
+		Weight::from_parts(33_704_180, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_633
-			.saturating_add(Weight::from_parts(128_927, 0).saturating_mul(s.into()))
-			// Standard Error: 16
-			.saturating_add(Weight::from_parts(1_543, 0).saturating_mul(z.into()))
+			// Standard Error: 1_456
+			.saturating_add(Weight::from_parts(147_148, 0).saturating_mul(s.into()))
+			// Standard Error: 14
+			.saturating_add(Weight::from_parts(2_037, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -85,15 +85,15 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_approve(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `282`
+		//  Measured:  `315`
 		//  Estimated: `6811`
-		// Minimum execution time: 29_745_000 picoseconds.
-		Weight::from_parts(20_559_891, 0)
+		// Minimum execution time: 32_218_000 picoseconds.
+		Weight::from_parts(21_320_145, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 914
-			.saturating_add(Weight::from_parts(103_601, 0).saturating_mul(s.into()))
-			// Standard Error: 8
-			.saturating_add(Weight::from_parts(1_504, 0).saturating_mul(z.into()))
+			// Standard Error: 1_922
+			.saturating_add(Weight::from_parts(131_349, 0).saturating_mul(s.into()))
+			// Standard Error: 18
+			.saturating_add(Weight::from_parts(1_829, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -105,60 +105,63 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_complete(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `385 + s * (33 ±0)`
+		//  Measured:  `418 + s * (33 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 51_506_000 picoseconds.
-		Weight::from_parts(36_510_777, 0)
+		// Minimum execution time: 53_641_000 picoseconds.
+		Weight::from_parts(32_057_363, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 2_183
-			.saturating_add(Weight::from_parts(183_764, 0).saturating_mul(s.into()))
-			// Standard Error: 21
-			.saturating_add(Weight::from_parts(1_653, 0).saturating_mul(z.into()))
+			// Standard Error: 2_897
+			.saturating_add(Weight::from_parts(254_035, 0).saturating_mul(s.into()))
+			// Standard Error: 28
+			.saturating_add(Weight::from_parts(2_432, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_create(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `263 + s * (2 ±0)`
+		//  Measured:  `295 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 31_072_000 picoseconds.
-		Weight::from_parts(32_408_621, 0)
+		// Minimum execution time: 30_302_000 picoseconds.
+		Weight::from_parts(33_367_363, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 913
-			.saturating_add(Weight::from_parts(121_410, 0).saturating_mul(s.into()))
+			// Standard Error: 1_389
+			.saturating_add(Weight::from_parts(150_845, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_approve(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `282`
+		//  Measured:  `315`
 		//  Estimated: `6811`
-		// Minimum execution time: 18_301_000 picoseconds.
-		Weight::from_parts(18_223_547, 0)
+		// Minimum execution time: 17_008_000 picoseconds.
+		Weight::from_parts(18_452_875, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 747
-			.saturating_add(Weight::from_parts(114_584, 0).saturating_mul(s.into()))
+			// Standard Error: 949
+			.saturating_add(Weight::from_parts(130_051, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn cancel_as_multi(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `454 + s * (1 ±0)`
+		//  Measured:  `482 + s * (1 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 32_107_000 picoseconds.
-		Weight::from_parts(33_674_827, 0)
+		// Minimum execution time: 30_645_000 picoseconds.
+		Weight::from_parts(33_864_517, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_220
-			.saturating_add(Weight::from_parts(122_011, 0).saturating_mul(s.into()))
+			// Standard Error: 1_511
+			.saturating_add(Weight::from_parts(138_628, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_multisig.rs
index 27687e10751..737ee0f54df 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_multisig.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_multisig.rs
@@ -1,42 +1,43 @@
 // Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
+// This file is part of Cumulus.
 
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// 	http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
 
 //! Autogenerated weights for `pallet_multisig`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024
+//! HOSTNAME: `e20fc9f125eb`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
+// --extrinsic=*
 // --chain=asset-hub-westend-dev
-// --wasm-execution=compiled
 // --pallet=pallet_multisig
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights
+// --wasm-execution=compiled
 // --steps=50
 // --repeat=20
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/assets/asset-hub-westend/src/weights/
+// --heap-pages=4096
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -54,11 +55,11 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 14_098_000 picoseconds.
-		Weight::from_parts(14_915_657, 0)
+		// Minimum execution time: 16_032_000 picoseconds.
+		Weight::from_parts(16_636_014, 0)
 			.saturating_add(Weight::from_parts(0, 0))
-			// Standard Error: 6
-			.saturating_add(Weight::from_parts(454, 0).saturating_mul(z.into()))
+			// Standard Error: 11
+			.saturating_add(Weight::from_parts(632, 0).saturating_mul(z.into()))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
@@ -66,15 +67,15 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_create(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `262 + s * (2 ±0)`
+		//  Measured:  `295 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 44_573_000 picoseconds.
-		Weight::from_parts(32_633_219, 0)
+		// Minimum execution time: 47_519_000 picoseconds.
+		Weight::from_parts(33_881_382, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_256
-			.saturating_add(Weight::from_parts(131_767, 0).saturating_mul(s.into()))
-			// Standard Error: 12
-			.saturating_add(Weight::from_parts(1_512, 0).saturating_mul(z.into()))
+			// Standard Error: 1_770
+			.saturating_add(Weight::from_parts(159_560, 0).saturating_mul(s.into()))
+			// Standard Error: 17
+			.saturating_add(Weight::from_parts(2_031, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -84,15 +85,15 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_approve(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `282`
+		//  Measured:  `315`
 		//  Estimated: `6811`
-		// Minimum execution time: 30_035_000 picoseconds.
-		Weight::from_parts(20_179_371, 0)
+		// Minimum execution time: 31_369_000 picoseconds.
+		Weight::from_parts(18_862_672, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 827
-			.saturating_add(Weight::from_parts(110_520, 0).saturating_mul(s.into()))
-			// Standard Error: 8
-			.saturating_add(Weight::from_parts(1_419, 0).saturating_mul(z.into()))
+			// Standard Error: 1_519
+			.saturating_add(Weight::from_parts(141_546, 0).saturating_mul(s.into()))
+			// Standard Error: 14
+			.saturating_add(Weight::from_parts(2_057, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -104,60 +105,63 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_complete(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `385 + s * (33 ±0)`
+		//  Measured:  `418 + s * (33 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 50_444_000 picoseconds.
-		Weight::from_parts(36_060_265, 0)
+		// Minimum execution time: 55_421_000 picoseconds.
+		Weight::from_parts(33_628_199, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_604
-			.saturating_add(Weight::from_parts(187_796, 0).saturating_mul(s.into()))
-			// Standard Error: 15
-			.saturating_add(Weight::from_parts(1_506, 0).saturating_mul(z.into()))
+			// Standard Error: 2_430
+			.saturating_add(Weight::from_parts(247_959, 0).saturating_mul(s.into()))
+			// Standard Error: 23
+			.saturating_add(Weight::from_parts(2_339, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_create(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `263 + s * (2 ±0)`
+		//  Measured:  `295 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 30_298_000 picoseconds.
-		Weight::from_parts(31_284_628, 0)
+		// Minimum execution time: 30_380_000 picoseconds.
+		Weight::from_parts(32_147_463, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 924
-			.saturating_add(Weight::from_parts(132_724, 0).saturating_mul(s.into()))
+			// Standard Error: 1_530
+			.saturating_add(Weight::from_parts(156_234, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_approve(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `282`
+		//  Measured:  `315`
 		//  Estimated: `6811`
-		// Minimum execution time: 17_486_000 picoseconds.
-		Weight::from_parts(18_518_530, 0)
+		// Minimum execution time: 17_016_000 picoseconds.
+		Weight::from_parts(17_777_791, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_274
-			.saturating_add(Weight::from_parts(103_767, 0).saturating_mul(s.into()))
+			// Standard Error: 1_216
+			.saturating_add(Weight::from_parts(137_967, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn cancel_as_multi(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `454 + s * (1 ±0)`
+		//  Measured:  `482 + s * (1 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 31_236_000 picoseconds.
-		Weight::from_parts(32_663_816, 0)
+		// Minimum execution time: 31_594_000 picoseconds.
+		Weight::from_parts(31_850_574, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_445
-			.saturating_add(Weight::from_parts(131_060, 0).saturating_mul(s.into()))
+			// Standard Error: 2_031
+			.saturating_add(Weight::from_parts(159_513, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_multisig.rs
index 832380d3876..4ee6f672540 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_multisig.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_multisig.rs
@@ -1,42 +1,43 @@
 // Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
+// This file is part of Cumulus.
 
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// 	http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
 
 //! Autogenerated weights for `pallet_multisig`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024
+//! HOSTNAME: `e20fc9f125eb`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
+// --extrinsic=*
 // --chain=bridge-hub-rococo-dev
-// --wasm-execution=compiled
 // --pallet=pallet_multisig
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights
+// --wasm-execution=compiled
 // --steps=50
 // --repeat=20
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/
+// --heap-pages=4096
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -54,11 +55,11 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 13_958_000 picoseconds.
-		Weight::from_parts(14_501_711, 0)
+		// Minimum execution time: 16_890_000 picoseconds.
+		Weight::from_parts(17_493_920, 0)
 			.saturating_add(Weight::from_parts(0, 0))
-			// Standard Error: 4
-			.saturating_add(Weight::from_parts(626, 0).saturating_mul(z.into()))
+			// Standard Error: 11
+			.saturating_add(Weight::from_parts(559, 0).saturating_mul(z.into()))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
@@ -66,15 +67,15 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_create(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `263 + s * (2 ±0)`
+		//  Measured:  `191 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 44_067_000 picoseconds.
-		Weight::from_parts(33_432_998, 0)
+		// Minimum execution time: 46_099_000 picoseconds.
+		Weight::from_parts(34_431_293, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_250
-			.saturating_add(Weight::from_parts(131_851, 0).saturating_mul(s.into()))
-			// Standard Error: 12
-			.saturating_add(Weight::from_parts(1_459, 0).saturating_mul(z.into()))
+			// Standard Error: 2_489
+			.saturating_add(Weight::from_parts(151_886, 0).saturating_mul(s.into()))
+			// Standard Error: 24
+			.saturating_add(Weight::from_parts(1_900, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -84,15 +85,15 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_approve(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `282`
+		//  Measured:  `210`
 		//  Estimated: `6811`
-		// Minimum execution time: 29_373_000 picoseconds.
-		Weight::from_parts(19_409_201, 0)
+		// Minimum execution time: 31_133_000 picoseconds.
+		Weight::from_parts(19_877_758, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 725
-			.saturating_add(Weight::from_parts(110_824, 0).saturating_mul(s.into()))
-			// Standard Error: 7
-			.saturating_add(Weight::from_parts(1_502, 0).saturating_mul(z.into()))
+			// Standard Error: 1_220
+			.saturating_add(Weight::from_parts(132_155, 0).saturating_mul(s.into()))
+			// Standard Error: 11
+			.saturating_add(Weight::from_parts(1_916, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -104,60 +105,63 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_complete(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `388 + s * (33 ±0)`
+		//  Measured:  `316 + s * (33 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 49_724_000 picoseconds.
-		Weight::from_parts(34_153_321, 0)
+		// Minimum execution time: 58_414_000 picoseconds.
+		Weight::from_parts(32_980_753, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_376
-			.saturating_add(Weight::from_parts(174_634, 0).saturating_mul(s.into()))
-			// Standard Error: 13
-			.saturating_add(Weight::from_parts(1_753, 0).saturating_mul(z.into()))
+			// Standard Error: 3_838
+			.saturating_add(Weight::from_parts(302_359, 0).saturating_mul(s.into()))
+			// Standard Error: 37
+			.saturating_add(Weight::from_parts(2_629, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_create(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `263 + s * (2 ±0)`
+		//  Measured:  `191 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 31_081_000 picoseconds.
-		Weight::from_parts(31_552_702, 0)
+		// Minimum execution time: 29_917_000 picoseconds.
+		Weight::from_parts(33_459_806, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_066
-			.saturating_add(Weight::from_parts(135_081, 0).saturating_mul(s.into()))
+			// Standard Error: 1_607
+			.saturating_add(Weight::from_parts(150_128, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_approve(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `282`
+		//  Measured:  `210`
 		//  Estimated: `6811`
-		// Minimum execution time: 17_807_000 picoseconds.
-		Weight::from_parts(18_241_044, 0)
+		// Minimum execution time: 16_739_000 picoseconds.
+		Weight::from_parts(16_757_542, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 768
-			.saturating_add(Weight::from_parts(112_957, 0).saturating_mul(s.into()))
+			// Standard Error: 909
+			.saturating_add(Weight::from_parts(138_791, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn cancel_as_multi(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `454 + s * (1 ±0)`
+		//  Measured:  `382 + s * (1 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 32_421_000 picoseconds.
-		Weight::from_parts(32_554_061, 0)
+		// Minimum execution time: 35_004_000 picoseconds.
+		Weight::from_parts(35_434_253, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_157
-			.saturating_add(Weight::from_parts(141_221, 0).saturating_mul(s.into()))
+			// Standard Error: 1_130
+			.saturating_add(Weight::from_parts(158_542, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_multisig.rs
index 91840ae0c6d..599bed182de 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_multisig.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_multisig.rs
@@ -16,28 +16,28 @@
 
 //! Autogenerated weights for `pallet_multisig`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024
+//! HOSTNAME: `e20fc9f125eb`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
-// --chain=bridge-hub-rococo-dev
-// --wasm-execution=compiled
-// --pallet=pallet_multisig
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
 // --extrinsic=*
+// --chain=bridge-hub-westend-dev
+// --pallet=pallet_multisig
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights
+// --wasm-execution=compiled
 // --steps=50
 // --repeat=20
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/
+// --heap-pages=4096
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -55,11 +55,11 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 13_958_000 picoseconds.
-		Weight::from_parts(14_501_711, 0)
+		// Minimum execution time: 16_960_000 picoseconds.
+		Weight::from_parts(17_458_038, 0)
 			.saturating_add(Weight::from_parts(0, 0))
-			// Standard Error: 4
-			.saturating_add(Weight::from_parts(626, 0).saturating_mul(z.into()))
+			// Standard Error: 14
+			.saturating_add(Weight::from_parts(745, 0).saturating_mul(z.into()))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
@@ -67,15 +67,15 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_create(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `263 + s * (2 ±0)`
+		//  Measured:  `296 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 44_067_000 picoseconds.
-		Weight::from_parts(33_432_998, 0)
+		// Minimum execution time: 49_023_000 picoseconds.
+		Weight::from_parts(36_653_713, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_250
-			.saturating_add(Weight::from_parts(131_851, 0).saturating_mul(s.into()))
-			// Standard Error: 12
-			.saturating_add(Weight::from_parts(1_459, 0).saturating_mul(z.into()))
+			// Standard Error: 1_966
+			.saturating_add(Weight::from_parts(144_768, 0).saturating_mul(s.into()))
+			// Standard Error: 19
+			.saturating_add(Weight::from_parts(1_983, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -85,15 +85,15 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_approve(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `282`
+		//  Measured:  `315`
 		//  Estimated: `6811`
-		// Minimum execution time: 29_373_000 picoseconds.
-		Weight::from_parts(19_409_201, 0)
+		// Minimum execution time: 32_233_000 picoseconds.
+		Weight::from_parts(20_563_994, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 725
-			.saturating_add(Weight::from_parts(110_824, 0).saturating_mul(s.into()))
-			// Standard Error: 7
-			.saturating_add(Weight::from_parts(1_502, 0).saturating_mul(z.into()))
+			// Standard Error: 1_541
+			.saturating_add(Weight::from_parts(137_834, 0).saturating_mul(s.into()))
+			// Standard Error: 15
+			.saturating_add(Weight::from_parts(2_004, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -105,60 +105,63 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_complete(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `388 + s * (33 ±0)`
+		//  Measured:  `421 + s * (33 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 49_724_000 picoseconds.
-		Weight::from_parts(34_153_321, 0)
+		// Minimum execution time: 57_893_000 picoseconds.
+		Weight::from_parts(32_138_684, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_376
-			.saturating_add(Weight::from_parts(174_634, 0).saturating_mul(s.into()))
-			// Standard Error: 13
-			.saturating_add(Weight::from_parts(1_753, 0).saturating_mul(z.into()))
+			// Standard Error: 3_096
+			.saturating_add(Weight::from_parts(324_931, 0).saturating_mul(s.into()))
+			// Standard Error: 30
+			.saturating_add(Weight::from_parts(2_617, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_create(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `263 + s * (2 ±0)`
+		//  Measured:  `296 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 31_081_000 picoseconds.
-		Weight::from_parts(31_552_702, 0)
+		// Minimum execution time: 31_313_000 picoseconds.
+		Weight::from_parts(33_535_933, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_066
-			.saturating_add(Weight::from_parts(135_081, 0).saturating_mul(s.into()))
+			// Standard Error: 1_649
+			.saturating_add(Weight::from_parts(153_756, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_approve(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `282`
+		//  Measured:  `315`
 		//  Estimated: `6811`
-		// Minimum execution time: 17_807_000 picoseconds.
-		Weight::from_parts(18_241_044, 0)
+		// Minimum execution time: 17_860_000 picoseconds.
+		Weight::from_parts(18_559_535, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 768
-			.saturating_add(Weight::from_parts(112_957, 0).saturating_mul(s.into()))
+			// Standard Error: 1_036
+			.saturating_add(Weight::from_parts(135_049, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn cancel_as_multi(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `454 + s * (1 ±0)`
+		//  Measured:  `487 + s * (1 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 32_421_000 picoseconds.
-		Weight::from_parts(32_554_061, 0)
+		// Minimum execution time: 32_340_000 picoseconds.
+		Weight::from_parts(33_519_124, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_157
-			.saturating_add(Weight::from_parts(141_221, 0).saturating_mul(s.into()))
+			// Standard Error: 1_932
+			.saturating_add(Weight::from_parts(193_896, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs
index a7827b72009..5c428bb5e5e 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs
@@ -1,42 +1,43 @@
 // Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
+// This file is part of Cumulus.
 
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// 	http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
 
 //! Autogenerated weights for `pallet_multisig`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024
+//! HOSTNAME: `e20fc9f125eb`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
-// --chain=collectives-polkadot-dev
-// --wasm-execution=compiled
-// --pallet=pallet_multisig
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
 // --extrinsic=*
+// --chain=collectives-westend-dev
+// --pallet=pallet_multisig
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights
+// --wasm-execution=compiled
 // --steps=50
 // --repeat=20
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/
+// --heap-pages=4096
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -54,11 +55,11 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 13_288_000 picoseconds.
-		Weight::from_parts(14_235_741, 0)
+		// Minimum execution time: 16_309_000 picoseconds.
+		Weight::from_parts(17_281_100, 0)
 			.saturating_add(Weight::from_parts(0, 0))
-			// Standard Error: 5
-			.saturating_add(Weight::from_parts(500, 0).saturating_mul(z.into()))
+			// Standard Error: 10
+			.saturating_add(Weight::from_parts(549, 0).saturating_mul(z.into()))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
@@ -68,13 +69,13 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `328 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 44_865_000 picoseconds.
-		Weight::from_parts(33_468_056, 0)
+		// Minimum execution time: 48_617_000 picoseconds.
+		Weight::from_parts(35_426_484, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_513
-			.saturating_add(Weight::from_parts(130_544, 0).saturating_mul(s.into()))
-			// Standard Error: 14
-			.saturating_add(Weight::from_parts(1_422, 0).saturating_mul(z.into()))
+			// Standard Error: 1_941
+			.saturating_add(Weight::from_parts(164_183, 0).saturating_mul(s.into()))
+			// Standard Error: 19
+			.saturating_add(Weight::from_parts(1_898, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -86,13 +87,13 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `348`
 		//  Estimated: `6811`
-		// Minimum execution time: 29_284_000 picoseconds.
-		Weight::from_parts(18_708_967, 0)
+		// Minimum execution time: 32_600_000 picoseconds.
+		Weight::from_parts(18_613_047, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 916
-			.saturating_add(Weight::from_parts(119_202, 0).saturating_mul(s.into()))
-			// Standard Error: 8
-			.saturating_add(Weight::from_parts(1_447, 0).saturating_mul(z.into()))
+			// Standard Error: 1_498
+			.saturating_add(Weight::from_parts(147_489, 0).saturating_mul(s.into()))
+			// Standard Error: 14
+			.saturating_add(Weight::from_parts(2_094, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -106,28 +107,29 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `451 + s * (33 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 49_462_000 picoseconds.
-		Weight::from_parts(34_470_286, 0)
+		// Minimum execution time: 55_580_000 picoseconds.
+		Weight::from_parts(32_757_473, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_738
-			.saturating_add(Weight::from_parts(178_227, 0).saturating_mul(s.into()))
-			// Standard Error: 17
-			.saturating_add(Weight::from_parts(1_644, 0).saturating_mul(z.into()))
+			// Standard Error: 3_265
+			.saturating_add(Weight::from_parts(261_212, 0).saturating_mul(s.into()))
+			// Standard Error: 32
+			.saturating_add(Weight::from_parts(2_407, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_create(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `329 + s * (2 ±0)`
+		//  Measured:  `328 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 30_749_000 picoseconds.
-		Weight::from_parts(31_841_438, 0)
+		// Minimum execution time: 31_137_000 picoseconds.
+		Weight::from_parts(32_271_159, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_033
-			.saturating_add(Weight::from_parts(123_126, 0).saturating_mul(s.into()))
+			// Standard Error: 1_280
+			.saturating_add(Weight::from_parts(163_156, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -138,11 +140,11 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `348`
 		//  Estimated: `6811`
-		// Minimum execution time: 17_436_000 picoseconds.
-		Weight::from_parts(18_036_002, 0)
+		// Minimum execution time: 17_763_000 picoseconds.
+		Weight::from_parts(18_235_437, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 829
-			.saturating_add(Weight::from_parts(109_450, 0).saturating_mul(s.into()))
+			// Standard Error: 1_245
+			.saturating_add(Weight::from_parts(138_553, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -151,13 +153,13 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 	/// The range of component `s` is `[2, 100]`.
 	fn cancel_as_multi(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `520 + s * (1 ±0)`
+		//  Measured:  `515 + s * (1 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 31_532_000 picoseconds.
-		Weight::from_parts(32_818_015, 0)
+		// Minimum execution time: 32_152_000 picoseconds.
+		Weight::from_parts(34_248_643, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 977
-			.saturating_add(Weight::from_parts(123_121, 0).saturating_mul(s.into()))
+			// Standard Error: 1_943
+			.saturating_add(Weight::from_parts(153_258, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_multisig.rs
index 8e010d768f6..f3ab1b1cac8 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_multisig.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_multisig.rs
@@ -16,28 +16,28 @@
 
 //! Autogenerated weights for `pallet_multisig`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2024-01-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `e20fc9f125eb`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
+// --extrinsic=*
 // --chain=coretime-rococo-dev
-// --wasm-execution=compiled
 // --pallet=pallet_multisig
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights
+// --wasm-execution=compiled
 // --steps=50
 // --repeat=20
-// --json
-// --header=./cumulus/file_header.txt
-// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/
+// --heap-pages=4096
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -55,11 +55,11 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 12_905_000 picoseconds.
-		Weight::from_parts(13_544_225, 0)
+		// Minimum execution time: 16_150_000 picoseconds.
+		Weight::from_parts(17_417_293, 0)
 			.saturating_add(Weight::from_parts(0, 0))
-			// Standard Error: 2
-			.saturating_add(Weight::from_parts(596, 0).saturating_mul(z.into()))
+			// Standard Error: 10
+			.saturating_add(Weight::from_parts(488, 0).saturating_mul(z.into()))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
@@ -69,13 +69,13 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `262 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 38_729_000 picoseconds.
-		Weight::from_parts(27_942_442, 0)
+		// Minimum execution time: 47_027_000 picoseconds.
+		Weight::from_parts(33_446_171, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 648
-			.saturating_add(Weight::from_parts(120_340, 0).saturating_mul(s.into()))
-			// Standard Error: 6
-			.saturating_add(Weight::from_parts(1_578, 0).saturating_mul(z.into()))
+			// Standard Error: 1_434
+			.saturating_add(Weight::from_parts(152_452, 0).saturating_mul(s.into()))
+			// Standard Error: 14
+			.saturating_add(Weight::from_parts(2_012, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -87,13 +87,13 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `282`
 		//  Estimated: `6811`
-		// Minimum execution time: 25_936_000 picoseconds.
-		Weight::from_parts(16_537_903, 0)
+		// Minimum execution time: 32_131_000 picoseconds.
+		Weight::from_parts(18_539_623, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 412
-			.saturating_add(Weight::from_parts(105_835, 0).saturating_mul(s.into()))
-			// Standard Error: 4
-			.saturating_add(Weight::from_parts(1_534, 0).saturating_mul(z.into()))
+			// Standard Error: 1_460
+			.saturating_add(Weight::from_parts(140_999, 0).saturating_mul(s.into()))
+			// Standard Error: 14
+			.saturating_add(Weight::from_parts(2_033, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -107,58 +107,61 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `385 + s * (33 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 45_291_000 picoseconds.
-		Weight::from_parts(31_294_385, 0)
+		// Minimum execution time: 53_701_000 picoseconds.
+		Weight::from_parts(32_431_551, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 816
-			.saturating_add(Weight::from_parts(152_838, 0).saturating_mul(s.into()))
-			// Standard Error: 8
-			.saturating_add(Weight::from_parts(1_638, 0).saturating_mul(z.into()))
+			// Standard Error: 2_797
+			.saturating_add(Weight::from_parts(255_676, 0).saturating_mul(s.into()))
+			// Standard Error: 27
+			.saturating_add(Weight::from_parts(2_261, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_create(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `263 + s * (2 ±0)`
+		//  Measured:  `262 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 26_585_000 picoseconds.
-		Weight::from_parts(27_424_168, 0)
+		// Minimum execution time: 30_011_000 picoseconds.
+		Weight::from_parts(32_146_378, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 732
-			.saturating_add(Weight::from_parts(123_460, 0).saturating_mul(s.into()))
+			// Standard Error: 1_455
+			.saturating_add(Weight::from_parts(160_784, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_approve(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `282`
 		//  Estimated: `6811`
-		// Minimum execution time: 15_228_000 picoseconds.
-		Weight::from_parts(15_568_631, 0)
+		// Minimum execution time: 16_968_000 picoseconds.
+		Weight::from_parts(16_851_993, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 441
-			.saturating_add(Weight::from_parts(107_463, 0).saturating_mul(s.into()))
+			// Standard Error: 793
+			.saturating_add(Weight::from_parts(142_320, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn cancel_as_multi(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `454 + s * (1 ±0)`
+		//  Measured:  `449 + s * (1 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 28_033_000 picoseconds.
-		Weight::from_parts(29_228_827, 0)
+		// Minimum execution time: 31_706_000 picoseconds.
+		Weight::from_parts(33_679_423, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 748
-			.saturating_add(Weight::from_parts(117_495, 0).saturating_mul(s.into()))
+			// Standard Error: 1_154
+			.saturating_add(Weight::from_parts(145_059, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_multisig.rs
index 1aaf3f4a6fb..044356f1e14 100644
--- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_multisig.rs
+++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_multisig.rs
@@ -17,27 +17,27 @@
 //! Autogenerated weights for `pallet_multisig`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `e20fc9f125eb`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
+// --extrinsic=*
 // --chain=coretime-westend-dev
-// --wasm-execution=compiled
 // --pallet=pallet_multisig
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights
+// --wasm-execution=compiled
 // --steps=50
 // --repeat=20
-// --json
-// --header=./cumulus/file_header.txt
-// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/
+// --heap-pages=4096
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -55,11 +55,11 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 11_938_000 picoseconds.
-		Weight::from_parts(13_021_007, 0)
+		// Minimum execution time: 16_090_000 picoseconds.
+		Weight::from_parts(16_926_991, 0)
 			.saturating_add(Weight::from_parts(0, 0))
-			// Standard Error: 4
-			.saturating_add(Weight::from_parts(482, 0).saturating_mul(z.into()))
+			// Standard Error: 6
+			.saturating_add(Weight::from_parts(500, 0).saturating_mul(z.into()))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
@@ -69,13 +69,13 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `262 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 37_643_000 picoseconds.
-		Weight::from_parts(27_088_068, 0)
+		// Minimum execution time: 46_739_000 picoseconds.
+		Weight::from_parts(34_253_833, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 828
-			.saturating_add(Weight::from_parts(123_693, 0).saturating_mul(s.into()))
-			// Standard Error: 8
-			.saturating_add(Weight::from_parts(1_456, 0).saturating_mul(z.into()))
+			// Standard Error: 1_258
+			.saturating_add(Weight::from_parts(141_511, 0).saturating_mul(s.into()))
+			// Standard Error: 12
+			.saturating_add(Weight::from_parts(1_969, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -87,13 +87,13 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `282`
 		//  Estimated: `6811`
-		// Minimum execution time: 25_825_000 picoseconds.
-		Weight::from_parts(15_698_835, 0)
+		// Minimum execution time: 31_190_000 picoseconds.
+		Weight::from_parts(18_287_369, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 568
-			.saturating_add(Weight::from_parts(111_928, 0).saturating_mul(s.into()))
-			// Standard Error: 5
-			.saturating_add(Weight::from_parts(1_421, 0).saturating_mul(z.into()))
+			// Standard Error: 1_405
+			.saturating_add(Weight::from_parts(143_414, 0).saturating_mul(s.into()))
+			// Standard Error: 13
+			.saturating_add(Weight::from_parts(2_047, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -107,58 +107,61 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `385 + s * (33 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 43_587_000 picoseconds.
-		Weight::from_parts(29_740_539, 0)
+		// Minimum execution time: 53_340_000 picoseconds.
+		Weight::from_parts(31_091_227, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 771
-			.saturating_add(Weight::from_parts(154_861, 0).saturating_mul(s.into()))
-			// Standard Error: 7
-			.saturating_add(Weight::from_parts(1_557, 0).saturating_mul(z.into()))
+			// Standard Error: 3_346
+			.saturating_add(Weight::from_parts(256_292, 0).saturating_mul(s.into()))
+			// Standard Error: 32
+			.saturating_add(Weight::from_parts(2_518, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_create(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `263 + s * (2 ±0)`
+		//  Measured:  `262 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 24_966_000 picoseconds.
-		Weight::from_parts(25_879_458, 0)
+		// Minimum execution time: 30_024_000 picoseconds.
+		Weight::from_parts(32_926_280, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 777
-			.saturating_add(Weight::from_parts(122_823, 0).saturating_mul(s.into()))
+			// Standard Error: 1_559
+			.saturating_add(Weight::from_parts(151_433, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_approve(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `282`
 		//  Estimated: `6811`
-		// Minimum execution time: 14_450_000 picoseconds.
-		Weight::from_parts(14_607_858, 0)
+		// Minimum execution time: 16_853_000 picoseconds.
+		Weight::from_parts(17_314_743, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 471
-			.saturating_add(Weight::from_parts(107_007, 0).saturating_mul(s.into()))
+			// Standard Error: 1_022
+			.saturating_add(Weight::from_parts(139_694, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn cancel_as_multi(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `454 + s * (1 ±0)`
+		//  Measured:  `449 + s * (1 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 26_861_000 picoseconds.
-		Weight::from_parts(27_846_825, 0)
+		// Minimum execution time: 31_102_000 picoseconds.
+		Weight::from_parts(32_212_096, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 714
-			.saturating_add(Weight::from_parts(116_914, 0).saturating_mul(s.into()))
+			// Standard Error: 1_524
+			.saturating_add(Weight::from_parts(151_963, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_multisig.rs
index 73abb62b048..82fcacf64ac 100644
--- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_multisig.rs
+++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_multisig.rs
@@ -1,40 +1,43 @@
 // Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
+// This file is part of Cumulus.
 
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// 	http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
 
 //! Autogenerated weights for `pallet_multisig`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz`
-//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("people-kusama-dev"), DB CACHE: 1024
+//! HOSTNAME: `e20fc9f125eb`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./artifacts/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
-// --chain=people-kusama-dev
-// --execution=wasm
-// --wasm-execution=compiled
-// --pallet=pallet_multisig
 // --extrinsic=*
+// --chain=people-rococo-dev
+// --pallet=pallet_multisig
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights
+// --wasm-execution=compiled
 // --steps=50
 // --repeat=20
-// --json
-// --header=./file_header.txt
-// --output=./cumulus/parachains/runtimes/people/people-kusama/src/weights/pallet_multisig.rs
+// --heap-pages=4096
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -52,110 +55,113 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 11_056_000 picoseconds.
-		Weight::from_parts(11_510_137, 0)
+		// Minimum execution time: 16_209_000 picoseconds.
+		Weight::from_parts(16_941_673, 0)
 			.saturating_add(Weight::from_parts(0, 0))
-			// Standard Error: 1
-			.saturating_add(Weight::from_parts(528, 0).saturating_mul(z.into()))
+			// Standard Error: 10
+			.saturating_add(Weight::from_parts(551, 0).saturating_mul(z.into()))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_create(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `263 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 41_105_000 picoseconds.
-		Weight::from_parts(34_947_072, 0)
+		// Minimum execution time: 47_880_000 picoseconds.
+		Weight::from_parts(35_747_073, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 499
-			.saturating_add(Weight::from_parts(67_375, 0).saturating_mul(s.into()))
-			// Standard Error: 4
-			.saturating_add(Weight::from_parts(1_227, 0).saturating_mul(z.into()))
+			// Standard Error: 2_069
+			.saturating_add(Weight::from_parts(147_421, 0).saturating_mul(s.into()))
+			// Standard Error: 20
+			.saturating_add(Weight::from_parts(1_853, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[3, 100]`.
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_approve(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `282`
 		//  Estimated: `6811`
-		// Minimum execution time: 26_640_000 picoseconds.
-		Weight::from_parts(21_515_344, 0)
+		// Minimum execution time: 31_245_000 picoseconds.
+		Weight::from_parts(19_011_583, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 943
-			.saturating_add(Weight::from_parts(58_769, 0).saturating_mul(s.into()))
-			// Standard Error: 9
-			.saturating_add(Weight::from_parts(1_233, 0).saturating_mul(z.into()))
+			// Standard Error: 1_336
+			.saturating_add(Weight::from_parts(136_422, 0).saturating_mul(s.into()))
+			// Standard Error: 13
+			.saturating_add(Weight::from_parts(2_013, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
-	/// Storage: System Account (r:1 w:1)
-	/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_complete(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `388 + s * (33 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 45_875_000 picoseconds.
-		Weight::from_parts(38_052_994, 0)
+		// Minimum execution time: 52_116_000 picoseconds.
+		Weight::from_parts(33_912_565, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 507
-			.saturating_add(Weight::from_parts(82_957, 0).saturating_mul(s.into()))
-			// Standard Error: 4
-			.saturating_add(Weight::from_parts(1_277, 0).saturating_mul(z.into()))
+			// Standard Error: 3_064
+			.saturating_add(Weight::from_parts(258_562, 0).saturating_mul(s.into()))
+			// Standard Error: 30
+			.saturating_add(Weight::from_parts(2_206, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_create(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `263 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 32_359_000 picoseconds.
-		Weight::from_parts(33_845_761, 0)
+		// Minimum execution time: 31_142_000 picoseconds.
+		Weight::from_parts(32_417_223, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 623
-			.saturating_add(Weight::from_parts(69_809, 0).saturating_mul(s.into()))
+			// Standard Error: 1_622
+			.saturating_add(Weight::from_parts(163_533, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_approve(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `282`
 		//  Estimated: `6811`
-		// Minimum execution time: 18_791_000 picoseconds.
-		Weight::from_parts(20_017_375, 0)
+		// Minimum execution time: 17_183_000 picoseconds.
+		Weight::from_parts(18_181_089, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 466
-			.saturating_add(Weight::from_parts(64_780, 0).saturating_mul(s.into()))
+			// Standard Error: 1_123
+			.saturating_add(Weight::from_parts(134_567, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn cancel_as_multi(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `454 + s * (1 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 33_132_000 picoseconds.
-		Weight::from_parts(34_485_734, 0)
+		// Minimum execution time: 32_006_000 picoseconds.
+		Weight::from_parts(33_910_335, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 601
-			.saturating_add(Weight::from_parts(70_191, 0).saturating_mul(s.into()))
+			// Standard Error: 1_347
+			.saturating_add(Weight::from_parts(138_258, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_multisig.rs
index 70809dea236..5857a140e05 100644
--- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_multisig.rs
+++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_multisig.rs
@@ -1,40 +1,43 @@
 // Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
+// This file is part of Cumulus.
 
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// 	http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
 
 //! Autogenerated weights for `pallet_multisig`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz`
-//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("people-polkadot-dev"), DB CACHE: 1024
+//! HOSTNAME: `e20fc9f125eb`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./artifacts/polkadot-parachain
+// target/production/polkadot-parachain
 // benchmark
 // pallet
-// --chain=people-polkadot-dev
-// --execution=wasm
-// --wasm-execution=compiled
-// --pallet=pallet_multisig
 // --extrinsic=*
+// --chain=people-westend-dev
+// --pallet=pallet_multisig
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights
+// --wasm-execution=compiled
 // --steps=50
 // --repeat=20
-// --json
-// --header=./file_header.txt
-// --output=./cumulus/parachains/runtimes/people/people-polkadot/src/weights/pallet_multisig.rs
+// --heap-pages=4096
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -52,110 +55,113 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 11_337_000 picoseconds.
-		Weight::from_parts(11_960_522, 0)
+		// Minimum execution time: 15_664_000 picoseconds.
+		Weight::from_parts(16_483_544, 0)
 			.saturating_add(Weight::from_parts(0, 0))
-			// Standard Error: 9
-			.saturating_add(Weight::from_parts(504, 0).saturating_mul(z.into()))
+			// Standard Error: 6
+			.saturating_add(Weight::from_parts(527, 0).saturating_mul(z.into()))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_create(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `263 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 41_128_000 picoseconds.
-		Weight::from_parts(35_215_592, 0)
+		// Minimum execution time: 47_543_000 picoseconds.
+		Weight::from_parts(32_140_648, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 429
-			.saturating_add(Weight::from_parts(65_959, 0).saturating_mul(s.into()))
-			// Standard Error: 4
-			.saturating_add(Weight::from_parts(1_230, 0).saturating_mul(z.into()))
+			// Standard Error: 2_184
+			.saturating_add(Weight::from_parts(163_779, 0).saturating_mul(s.into()))
+			// Standard Error: 21
+			.saturating_add(Weight::from_parts(2_192, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[3, 100]`.
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_approve(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `282`
 		//  Estimated: `6811`
-		// Minimum execution time: 26_878_000 picoseconds.
-		Weight::from_parts(21_448_577, 0)
+		// Minimum execution time: 31_080_000 picoseconds.
+		Weight::from_parts(19_282_980, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 354
-			.saturating_add(Weight::from_parts(60_286, 0).saturating_mul(s.into()))
-			// Standard Error: 3
-			.saturating_add(Weight::from_parts(1_236, 0).saturating_mul(z.into()))
+			// Standard Error: 1_261
+			.saturating_add(Weight::from_parts(134_865, 0).saturating_mul(s.into()))
+			// Standard Error: 12
+			.saturating_add(Weight::from_parts(2_015, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
-	/// Storage: System Account (r:1 w:1)
-	/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_complete(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `388 + s * (33 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 45_716_000 picoseconds.
-		Weight::from_parts(38_332_947, 0)
+		// Minimum execution time: 54_063_000 picoseconds.
+		Weight::from_parts(34_760_071, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 554
-			.saturating_add(Weight::from_parts(81_026, 0).saturating_mul(s.into()))
-			// Standard Error: 5
-			.saturating_add(Weight::from_parts(1_265, 0).saturating_mul(z.into()))
+			// Standard Error: 2_858
+			.saturating_add(Weight::from_parts(242_502, 0).saturating_mul(s.into()))
+			// Standard Error: 28
+			.saturating_add(Weight::from_parts(2_187, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_create(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `263 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 32_089_000 picoseconds.
-		Weight::from_parts(33_664_508, 0)
+		// Minimum execution time: 30_997_000 picoseconds.
+		Weight::from_parts(32_861_544, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 487
-			.saturating_add(Weight::from_parts(67_443, 0).saturating_mul(s.into()))
+			// Standard Error: 1_172
+			.saturating_add(Weight::from_parts(144_646, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_approve(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `282`
 		//  Estimated: `6811`
-		// Minimum execution time: 18_631_000 picoseconds.
-		Weight::from_parts(19_909_964, 0)
+		// Minimum execution time: 17_110_000 picoseconds.
+		Weight::from_parts(16_883_743, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 434
-			.saturating_add(Weight::from_parts(62_989, 0).saturating_mul(s.into()))
+			// Standard Error: 1_170
+			.saturating_add(Weight::from_parts(141_623, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn cancel_as_multi(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `454 + s * (1 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 32_486_000 picoseconds.
-		Weight::from_parts(34_303_784, 0)
+		// Minimum execution time: 31_575_000 picoseconds.
+		Weight::from_parts(33_599_222, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 585
-			.saturating_add(Weight::from_parts(69_979, 0).saturating_mul(s.into()))
+			// Standard Error: 1_343
+			.saturating_add(Weight::from_parts(148_578, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
diff --git a/polkadot/runtime/rococo/src/weights/pallet_multisig.rs b/polkadot/runtime/rococo/src/weights/pallet_multisig.rs
index f1b81759ece..d63c82daacd 100644
--- a/polkadot/runtime/rococo/src/weights/pallet_multisig.rs
+++ b/polkadot/runtime/rococo/src/weights/pallet_multisig.rs
@@ -17,27 +17,27 @@
 //! Autogenerated weights for `pallet_multisig`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `e20fc9f125eb`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot
+// target/production/polkadot
 // benchmark
 // pallet
+// --extrinsic=*
 // --chain=rococo-dev
+// --pallet=pallet_multisig
+// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt
+// --output=./polkadot/runtime/rococo/src/weights
+// --wasm-execution=compiled
 // --steps=50
 // --repeat=20
+// --heap-pages=4096
 // --no-storage-info
-// --no-median-slopes
 // --no-min-squares
-// --pallet=pallet_multisig
-// --extrinsic=*
-// --execution=wasm
-// --wasm-execution=compiled
-// --header=./polkadot/file_header.txt
-// --output=./polkadot/runtime/rococo/src/weights/
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -55,11 +55,11 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 12_023_000 picoseconds.
-		Weight::from_parts(12_643_116, 0)
+		// Minimum execution time: 15_707_000 picoseconds.
+		Weight::from_parts(17_199_004, 0)
 			.saturating_add(Weight::from_parts(0, 0))
-			// Standard Error: 3
-			.saturating_add(Weight::from_parts(582, 0).saturating_mul(z.into()))
+			// Standard Error: 15
+			.saturating_add(Weight::from_parts(639, 0).saturating_mul(z.into()))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
@@ -69,13 +69,13 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `229 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 39_339_000 picoseconds.
-		Weight::from_parts(27_243_033, 0)
+		// Minimum execution time: 47_949_000 picoseconds.
+		Weight::from_parts(33_500_294, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_319
-			.saturating_add(Weight::from_parts(142_212, 0).saturating_mul(s.into()))
-			// Standard Error: 12
-			.saturating_add(Weight::from_parts(1_592, 0).saturating_mul(z.into()))
+			// Standard Error: 1_775
+			.saturating_add(Weight::from_parts(159_011, 0).saturating_mul(s.into()))
+			// Standard Error: 17
+			.saturating_add(Weight::from_parts(2_213, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -87,13 +87,13 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `248`
 		//  Estimated: `6811`
-		// Minimum execution time: 27_647_000 picoseconds.
-		Weight::from_parts(15_828_725, 0)
+		// Minimum execution time: 31_197_000 picoseconds.
+		Weight::from_parts(19_488_352, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 908
-			.saturating_add(Weight::from_parts(130_880, 0).saturating_mul(s.into()))
-			// Standard Error: 8
-			.saturating_add(Weight::from_parts(1_532, 0).saturating_mul(z.into()))
+			// Standard Error: 1_332
+			.saturating_add(Weight::from_parts(138_347, 0).saturating_mul(s.into()))
+			// Standard Error: 13
+			.saturating_add(Weight::from_parts(2_122, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -107,28 +107,29 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `354 + s * (33 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 46_971_000 picoseconds.
-		Weight::from_parts(32_150_393, 0)
+		// Minimum execution time: 54_297_000 picoseconds.
+		Weight::from_parts(33_256_178, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_129
-			.saturating_add(Weight::from_parts(154_796, 0).saturating_mul(s.into()))
-			// Standard Error: 11
-			.saturating_add(Weight::from_parts(1_603, 0).saturating_mul(z.into()))
+			// Standard Error: 3_088
+			.saturating_add(Weight::from_parts(256_364, 0).saturating_mul(s.into()))
+			// Standard Error: 30
+			.saturating_add(Weight::from_parts(2_488, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
 	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_create(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `229 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 24_947_000 picoseconds.
-		Weight::from_parts(26_497_183, 0)
+		// Minimum execution time: 31_246_000 picoseconds.
+		Weight::from_parts(32_245_711, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_615
-			.saturating_add(Weight::from_parts(147_071, 0).saturating_mul(s.into()))
+			// Standard Error: 1_704
+			.saturating_add(Weight::from_parts(156_235, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -139,11 +140,11 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `248`
 		//  Estimated: `6811`
-		// Minimum execution time: 13_897_000 picoseconds.
-		Weight::from_parts(14_828_339, 0)
+		// Minimum execution time: 17_353_000 picoseconds.
+		Weight::from_parts(17_418_506, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_136
-			.saturating_add(Weight::from_parts(133_925, 0).saturating_mul(s.into()))
+			// Standard Error: 1_126
+			.saturating_add(Weight::from_parts(136_788, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -154,11 +155,11 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `420 + s * (1 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 28_984_000 picoseconds.
-		Weight::from_parts(29_853_232, 0)
+		// Minimum execution time: 32_603_000 picoseconds.
+		Weight::from_parts(33_456_399, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 650
-			.saturating_add(Weight::from_parts(113_440, 0).saturating_mul(s.into()))
+			// Standard Error: 1_239
+			.saturating_add(Weight::from_parts(146_249, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
diff --git a/polkadot/runtime/westend/src/weights/pallet_multisig.rs b/polkadot/runtime/westend/src/weights/pallet_multisig.rs
index 616aea9c8e7..83521f3d192 100644
--- a/polkadot/runtime/westend/src/weights/pallet_multisig.rs
+++ b/polkadot/runtime/westend/src/weights/pallet_multisig.rs
@@ -16,28 +16,28 @@
 
 //! Autogenerated weights for `pallet_multisig`
 //!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024
+//! HOSTNAME: `e20fc9f125eb`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
-// ./target/production/polkadot
+// target/production/polkadot
 // benchmark
 // pallet
+// --extrinsic=*
 // --chain=westend-dev
+// --pallet=pallet_multisig
+// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt
+// --output=./polkadot/runtime/westend/src/weights
+// --wasm-execution=compiled
 // --steps=50
 // --repeat=20
+// --heap-pages=4096
 // --no-storage-info
-// --no-median-slopes
 // --no-min-squares
-// --pallet=pallet_multisig
-// --extrinsic=*
-// --execution=wasm
-// --wasm-execution=compiled
-// --header=./file_header.txt
-// --output=./runtime/westend/src/weights/
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -55,110 +55,111 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 13_218_000 picoseconds.
-		Weight::from_parts(14_749_472, 0)
+		// Minimum execution time: 15_705_000 picoseconds.
+		Weight::from_parts(16_890_096, 0)
 			.saturating_add(Weight::from_parts(0, 0))
-			// Standard Error: 10
-			.saturating_add(Weight::from_parts(507, 0).saturating_mul(z.into()))
+			// Standard Error: 13
+			.saturating_add(Weight::from_parts(549, 0).saturating_mul(z.into()))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_create(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `309 + s * (2 ±0)`
+		//  Measured:  `267 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 45_891_000 picoseconds.
-		Weight::from_parts(33_546_627, 0)
+		// Minimum execution time: 54_293_000 picoseconds.
+		Weight::from_parts(39_710_880, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 2_347
-			.saturating_add(Weight::from_parts(136_466, 0).saturating_mul(s.into()))
-			// Standard Error: 23
-			.saturating_add(Weight::from_parts(1_595, 0).saturating_mul(z.into()))
+			// Standard Error: 1_591
+			.saturating_add(Weight::from_parts(164_846, 0).saturating_mul(s.into()))
+			// Standard Error: 15
+			.saturating_add(Weight::from_parts(1_993, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[3, 100]`.
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_approve(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `286`
 		//  Estimated: `6811`
-		// Minimum execution time: 30_355_000 picoseconds.
-		Weight::from_parts(19_611_682, 0)
+		// Minimum execution time: 36_477_000 picoseconds.
+		Weight::from_parts(22_595_904, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_383
-			.saturating_add(Weight::from_parts(123_652, 0).saturating_mul(s.into()))
-			// Standard Error: 13
-			.saturating_add(Weight::from_parts(1_488, 0).saturating_mul(z.into()))
+			// Standard Error: 1_526
+			.saturating_add(Weight::from_parts(159_314, 0).saturating_mul(s.into()))
+			// Standard Error: 14
+			.saturating_add(Weight::from_parts(2_219, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
-	/// Storage: System Account (r:1 w:1)
-	/// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
+	/// Storage: `System::Account` (r:1 w:1)
+	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_complete(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `392 + s * (33 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 50_453_000 picoseconds.
-		Weight::from_parts(35_628_285, 0)
+		// Minimum execution time: 60_127_000 picoseconds.
+		Weight::from_parts(33_469_803, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 3_693
-			.saturating_add(Weight::from_parts(203_453, 0).saturating_mul(s.into()))
-			// Standard Error: 36
-			.saturating_add(Weight::from_parts(1_726, 0).saturating_mul(z.into()))
+			// Standard Error: 3_400
+			.saturating_add(Weight::from_parts(309_634, 0).saturating_mul(s.into()))
+			// Standard Error: 33
+			.saturating_add(Weight::from_parts(2_795, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
+	/// The range of component `z` is `[0, 10000]`.
 	fn approve_as_multi_create(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `314 + s * (2 ±0)`
+		//  Measured:  `267 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 32_500_000 picoseconds.
-		Weight::from_parts(33_231_806, 0)
+		// Minimum execution time: 36_697_000 picoseconds.
+		Weight::from_parts(38_746_125, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_511
-			.saturating_add(Weight::from_parts(134_500, 0).saturating_mul(s.into()))
+			// Standard Error: 2_073
+			.saturating_add(Weight::from_parts(159_426, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
 	fn approve_as_multi_approve(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `286`
 		//  Estimated: `6811`
-		// Minimum execution time: 17_906_000 picoseconds.
-		Weight::from_parts(18_757_928, 0)
+		// Minimum execution time: 21_909_000 picoseconds.
+		Weight::from_parts(22_227_385, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_172
-			.saturating_add(Weight::from_parts(113_535, 0).saturating_mul(s.into()))
+			// Standard Error: 1_063
+			.saturating_add(Weight::from_parts(146_021, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: Multisig Multisigs (r:1 w:1)
-	/// Proof: Multisig Multisigs (max_values: None, max_size: Some(3346), added: 5821, mode: MaxEncodedLen)
+	/// Storage: `Multisig::Multisigs` (r:1 w:1)
+	/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
 	/// The range of component `s` is `[2, 100]`.
 	fn cancel_as_multi(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `458 + s * (1 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 33_018_000 picoseconds.
-		Weight::from_parts(34_186_533, 0)
+		// Minimum execution time: 36_637_000 picoseconds.
+		Weight::from_parts(36_457_379, 0)
 			.saturating_add(Weight::from_parts(0, 6811))
-			// Standard Error: 1_188
-			.saturating_add(Weight::from_parts(128_449, 0).saturating_mul(s.into()))
+			// Standard Error: 1_709
+			.saturating_add(Weight::from_parts(171_090, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
diff --git a/substrate/.maintain/frame-weight-template.hbs b/substrate/.maintain/frame-weight-template.hbs
index ec9eee205ce..b174823b384 100644
--- a/substrate/.maintain/frame-weight-template.hbs
+++ b/substrate/.maintain/frame-weight-template.hbs
@@ -17,8 +17,7 @@
 #![allow(unused_imports)]
 #![allow(missing_docs)]
 
-use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
-use core::marker::PhantomData;
+use frame::weights_prelude::*;
 
 /// Weight functions needed for `{{pallet}}`.
 pub trait WeightInfo {
diff --git a/substrate/frame/multisig/src/benchmarking.rs b/substrate/frame/multisig/src/benchmarking.rs
index ccaa1ceab66..3f75d92fe0e 100644
--- a/substrate/frame/multisig/src/benchmarking.rs
+++ b/substrate/frame/multisig/src/benchmarking.rs
@@ -194,14 +194,14 @@ mod benchmarks {
 		Ok(())
 	}
 
-	/// `z`: Transaction Length, not a component
 	/// `s`: Signatories, need at least 2 people
 	#[benchmark]
 	fn approve_as_multi_create(
 		s: Linear<2, { T::MaxSignatories::get() }>,
-		z: Linear<0, 10_000>,
 	) -> Result<(), BenchmarkError> {
-		let (mut signatories, call) = setup_multi::<T>(s, z)?;
+		// The call is neither in storage or an argument, so just use any:
+		let call_len = 10_000;
+		let (mut signatories, call) = setup_multi::<T>(s, call_len)?;
 		let multi_account_id = Multisig::<T>::multi_account_id(&signatories, s.try_into().unwrap());
 		let caller = signatories.pop().ok_or("signatories should have len 2 or more")?;
 		let call_hash = call.using_encoded(blake2_256);
@@ -225,14 +225,14 @@ mod benchmarks {
 		Ok(())
 	}
 
-	/// `z`: Transaction Length, not a component
 	/// `s`: Signatories, need at least 2 people
 	#[benchmark]
 	fn approve_as_multi_approve(
 		s: Linear<2, { T::MaxSignatories::get() }>,
-		z: Linear<0, 10_000>,
 	) -> Result<(), BenchmarkError> {
-		let (mut signatories, call) = setup_multi::<T>(s, z)?;
+		// The call is neither in storage or an argument, so just use any:
+		let call_len = 10_000;
+		let (mut signatories, call) = setup_multi::<T>(s, call_len)?;
 		let mut signatories2 = signatories.clone();
 		let multi_account_id = Multisig::<T>::multi_account_id(&signatories, s.try_into().unwrap());
 		let caller = signatories.pop().ok_or("signatories should have len 2 or more")?;
@@ -270,14 +270,12 @@ mod benchmarks {
 		Ok(())
 	}
 
-	/// `z`: Transaction Length, not a component
 	/// `s`: Signatories, need at least 2 people
 	#[benchmark]
-	fn cancel_as_multi(
-		s: Linear<2, { T::MaxSignatories::get() }>,
-		z: Linear<0, 10_000>,
-	) -> Result<(), BenchmarkError> {
-		let (mut signatories, call) = setup_multi::<T>(s, z)?;
+	fn cancel_as_multi(s: Linear<2, { T::MaxSignatories::get() }>) -> Result<(), BenchmarkError> {
+		// The call is neither in storage or an argument, so just use any:
+		let call_len = 10_000;
+		let (mut signatories, call) = setup_multi::<T>(s, call_len)?;
 		let multi_account_id = Multisig::<T>::multi_account_id(&signatories, s.try_into().unwrap());
 		let caller = signatories.pop().ok_or("signatories should have len 2 or more")?;
 		let call_hash = call.using_encoded(blake2_256);
diff --git a/substrate/frame/multisig/src/weights.rs b/substrate/frame/multisig/src/weights.rs
index 5c14922e0ef..1c91734e618 100644
--- a/substrate/frame/multisig/src/weights.rs
+++ b/substrate/frame/multisig/src/weights.rs
@@ -18,36 +18,36 @@
 //! Autogenerated weights for `pallet_multisig`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-04-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `25968fd2c26d`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024`
 
 // Executed Command:
-// ./target/production/substrate-node
+// target/production/substrate-node
 // benchmark
 // pallet
+// --extrinsic=*
 // --chain=dev
+// --pallet=pallet_multisig
+// --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2
+// --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/multisig/src/weights.rs
+// --wasm-execution=compiled
 // --steps=50
 // --repeat=20
-// --pallet=pallet_multisig
+// --heap-pages=4096
+// --template=substrate/.maintain/frame-weight-template.hbs
 // --no-storage-info
-// --no-median-slopes
 // --no-min-squares
-// --extrinsic=*
-// --wasm-execution=compiled
-// --heap-pages=4096
-// --output=./substrate/frame/multisig/src/weights.rs
-// --header=./substrate/HEADER-APACHE2
-// --template=./substrate/.maintain/frame-weight-template.hbs
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
 #![allow(unused_imports)]
 #![allow(missing_docs)]
 
-// TODO update this in frame-weight-template.hbs
 use frame::weights_prelude::*;
+
 /// Weight functions needed for `pallet_multisig`.
 pub trait WeightInfo {
 	fn as_multi_threshold_1(z: u32, ) -> Weight;
@@ -71,10 +71,10 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `145`
 		//  Estimated: `3997`
-		// Minimum execution time: 20_302_000 picoseconds.
-		Weight::from_parts(21_362_808, 3997)
-			// Standard Error: 4
-			.saturating_add(Weight::from_parts(432, 0).saturating_mul(z.into()))
+		// Minimum execution time: 28_800_000 picoseconds.
+		Weight::from_parts(30_130_161, 3997)
+			// Standard Error: 18
+			.saturating_add(Weight::from_parts(551, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(2_u64))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
@@ -83,14 +83,14 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_create(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `301 + s * (2 ±0)`
+		//  Measured:  `334 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 41_140_000 picoseconds.
-		Weight::from_parts(31_518_927, 6811)
-			// Standard Error: 754
-			.saturating_add(Weight::from_parts(115_804, 0).saturating_mul(s.into()))
-			// Standard Error: 7
-			.saturating_add(Weight::from_parts(1_442, 0).saturating_mul(z.into()))
+		// Minimum execution time: 51_467_000 picoseconds.
+		Weight::from_parts(38_610_296, 6811)
+			// Standard Error: 1_796
+			.saturating_add(Weight::from_parts(161_251, 0).saturating_mul(s.into()))
+			// Standard Error: 17
+			.saturating_add(Weight::from_parts(2_068, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1_u64))
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
@@ -100,14 +100,14 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_approve(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `320`
+		//  Measured:  `353`
 		//  Estimated: `6811`
-		// Minimum execution time: 27_375_000 picoseconds.
-		Weight::from_parts(17_806_361, 6811)
-			// Standard Error: 501
-			.saturating_add(Weight::from_parts(107_042, 0).saturating_mul(s.into()))
-			// Standard Error: 4
-			.saturating_add(Weight::from_parts(1_491, 0).saturating_mul(z.into()))
+		// Minimum execution time: 36_208_000 picoseconds.
+		Weight::from_parts(24_694_507, 6811)
+			// Standard Error: 1_430
+			.saturating_add(Weight::from_parts(134_263, 0).saturating_mul(s.into()))
+			// Standard Error: 14
+			.saturating_add(Weight::from_parts(2_021, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(1_u64))
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
@@ -123,14 +123,14 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_complete(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `571 + s * (33 ±0)`
+		//  Measured:  `604 + s * (33 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 54_427_000 picoseconds.
-		Weight::from_parts(43_677_970, 6811)
-			// Standard Error: 1_342
-			.saturating_add(Weight::from_parts(154_697, 0).saturating_mul(s.into()))
-			// Standard Error: 13
-			.saturating_add(Weight::from_parts(1_534, 0).saturating_mul(z.into()))
+		// Minimum execution time: 65_217_000 picoseconds.
+		Weight::from_parts(48_235_573, 6811)
+			// Standard Error: 2_841
+			.saturating_add(Weight::from_parts(205_077, 0).saturating_mul(s.into()))
+			// Standard Error: 27
+			.saturating_add(Weight::from_parts(2_298, 0).saturating_mul(z.into()))
 			.saturating_add(T::DbWeight::get().reads(4_u64))
 			.saturating_add(T::DbWeight::get().writes(2_u64))
 	}
@@ -139,12 +139,12 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `s` is `[2, 100]`.
 	fn approve_as_multi_create(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `301 + s * (2 ±0)`
+		//  Measured:  `334 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 29_102_000 picoseconds.
-		Weight::from_parts(30_317_105, 6811)
-			// Standard Error: 903
-			.saturating_add(Weight::from_parts(109_792, 0).saturating_mul(s.into()))
+		// Minimum execution time: 35_727_000 picoseconds.
+		Weight::from_parts(37_329_524, 6811)
+			// Standard Error: 1_814
+			.saturating_add(Weight::from_parts(157_471, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1_u64))
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
@@ -153,12 +153,12 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `s` is `[2, 100]`.
 	fn approve_as_multi_approve(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `320`
+		//  Measured:  `353`
 		//  Estimated: `6811`
-		// Minimum execution time: 16_300_000 picoseconds.
-		Weight::from_parts(17_358_877, 6811)
-			// Standard Error: 522
-			.saturating_add(Weight::from_parts(99_194, 0).saturating_mul(s.into()))
+		// Minimum execution time: 21_623_000 picoseconds.
+		Weight::from_parts(22_601_251, 6811)
+			// Standard Error: 963
+			.saturating_add(Weight::from_parts(139_320, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1_u64))
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
@@ -167,12 +167,12 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
 	/// The range of component `s` is `[2, 100]`.
 	fn cancel_as_multi(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `492 + s * (1 ±0)`
+		//  Measured:  `525 + s * (1 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 30_147_000 picoseconds.
-		Weight::from_parts(32_003_421, 6811)
-			// Standard Error: 1_077
-			.saturating_add(Weight::from_parts(108_567, 0).saturating_mul(s.into()))
+		// Minimum execution time: 36_801_000 picoseconds.
+		Weight::from_parts(37_578_412, 6811)
+			// Standard Error: 1_580
+			.saturating_add(Weight::from_parts(159_580, 0).saturating_mul(s.into()))
 			.saturating_add(T::DbWeight::get().reads(1_u64))
 			.saturating_add(T::DbWeight::get().writes(1_u64))
 	}
@@ -189,10 +189,10 @@ impl WeightInfo for () {
 		// Proof Size summary in bytes:
 		//  Measured:  `145`
 		//  Estimated: `3997`
-		// Minimum execution time: 20_302_000 picoseconds.
-		Weight::from_parts(21_362_808, 3997)
-			// Standard Error: 4
-			.saturating_add(Weight::from_parts(432, 0).saturating_mul(z.into()))
+		// Minimum execution time: 28_800_000 picoseconds.
+		Weight::from_parts(30_130_161, 3997)
+			// Standard Error: 18
+			.saturating_add(Weight::from_parts(551, 0).saturating_mul(z.into()))
 			.saturating_add(RocksDbWeight::get().reads(2_u64))
 	}
 	/// Storage: `Multisig::Multisigs` (r:1 w:1)
@@ -201,14 +201,14 @@ impl WeightInfo for () {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_create(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `301 + s * (2 ±0)`
+		//  Measured:  `334 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 41_140_000 picoseconds.
-		Weight::from_parts(31_518_927, 6811)
-			// Standard Error: 754
-			.saturating_add(Weight::from_parts(115_804, 0).saturating_mul(s.into()))
-			// Standard Error: 7
-			.saturating_add(Weight::from_parts(1_442, 0).saturating_mul(z.into()))
+		// Minimum execution time: 51_467_000 picoseconds.
+		Weight::from_parts(38_610_296, 6811)
+			// Standard Error: 1_796
+			.saturating_add(Weight::from_parts(161_251, 0).saturating_mul(s.into()))
+			// Standard Error: 17
+			.saturating_add(Weight::from_parts(2_068, 0).saturating_mul(z.into()))
 			.saturating_add(RocksDbWeight::get().reads(1_u64))
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
@@ -218,14 +218,14 @@ impl WeightInfo for () {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_approve(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `320`
+		//  Measured:  `353`
 		//  Estimated: `6811`
-		// Minimum execution time: 27_375_000 picoseconds.
-		Weight::from_parts(17_806_361, 6811)
-			// Standard Error: 501
-			.saturating_add(Weight::from_parts(107_042, 0).saturating_mul(s.into()))
-			// Standard Error: 4
-			.saturating_add(Weight::from_parts(1_491, 0).saturating_mul(z.into()))
+		// Minimum execution time: 36_208_000 picoseconds.
+		Weight::from_parts(24_694_507, 6811)
+			// Standard Error: 1_430
+			.saturating_add(Weight::from_parts(134_263, 0).saturating_mul(s.into()))
+			// Standard Error: 14
+			.saturating_add(Weight::from_parts(2_021, 0).saturating_mul(z.into()))
 			.saturating_add(RocksDbWeight::get().reads(1_u64))
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
@@ -241,14 +241,14 @@ impl WeightInfo for () {
 	/// The range of component `z` is `[0, 10000]`.
 	fn as_multi_complete(s: u32, z: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `571 + s * (33 ±0)`
+		//  Measured:  `604 + s * (33 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 54_427_000 picoseconds.
-		Weight::from_parts(43_677_970, 6811)
-			// Standard Error: 1_342
-			.saturating_add(Weight::from_parts(154_697, 0).saturating_mul(s.into()))
-			// Standard Error: 13
-			.saturating_add(Weight::from_parts(1_534, 0).saturating_mul(z.into()))
+		// Minimum execution time: 65_217_000 picoseconds.
+		Weight::from_parts(48_235_573, 6811)
+			// Standard Error: 2_841
+			.saturating_add(Weight::from_parts(205_077, 0).saturating_mul(s.into()))
+			// Standard Error: 27
+			.saturating_add(Weight::from_parts(2_298, 0).saturating_mul(z.into()))
 			.saturating_add(RocksDbWeight::get().reads(4_u64))
 			.saturating_add(RocksDbWeight::get().writes(2_u64))
 	}
@@ -257,12 +257,12 @@ impl WeightInfo for () {
 	/// The range of component `s` is `[2, 100]`.
 	fn approve_as_multi_create(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `301 + s * (2 ±0)`
+		//  Measured:  `334 + s * (2 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 29_102_000 picoseconds.
-		Weight::from_parts(30_317_105, 6811)
-			// Standard Error: 903
-			.saturating_add(Weight::from_parts(109_792, 0).saturating_mul(s.into()))
+		// Minimum execution time: 35_727_000 picoseconds.
+		Weight::from_parts(37_329_524, 6811)
+			// Standard Error: 1_814
+			.saturating_add(Weight::from_parts(157_471, 0).saturating_mul(s.into()))
 			.saturating_add(RocksDbWeight::get().reads(1_u64))
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
@@ -271,12 +271,12 @@ impl WeightInfo for () {
 	/// The range of component `s` is `[2, 100]`.
 	fn approve_as_multi_approve(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `320`
+		//  Measured:  `353`
 		//  Estimated: `6811`
-		// Minimum execution time: 16_300_000 picoseconds.
-		Weight::from_parts(17_358_877, 6811)
-			// Standard Error: 522
-			.saturating_add(Weight::from_parts(99_194, 0).saturating_mul(s.into()))
+		// Minimum execution time: 21_623_000 picoseconds.
+		Weight::from_parts(22_601_251, 6811)
+			// Standard Error: 963
+			.saturating_add(Weight::from_parts(139_320, 0).saturating_mul(s.into()))
 			.saturating_add(RocksDbWeight::get().reads(1_u64))
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
@@ -285,13 +285,13 @@ impl WeightInfo for () {
 	/// The range of component `s` is `[2, 100]`.
 	fn cancel_as_multi(s: u32, ) -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `492 + s * (1 ±0)`
+		//  Measured:  `525 + s * (1 ±0)`
 		//  Estimated: `6811`
-		// Minimum execution time: 30_147_000 picoseconds.
-		Weight::from_parts(32_003_421, 6811)
-			// Standard Error: 1_077
-			.saturating_add(Weight::from_parts(108_567, 0).saturating_mul(s.into()))
+		// Minimum execution time: 36_801_000 picoseconds.
+		Weight::from_parts(37_578_412, 6811)
+			// Standard Error: 1_580
+			.saturating_add(Weight::from_parts(159_580, 0).saturating_mul(s.into()))
 			.saturating_add(RocksDbWeight::get().reads(1_u64))
 			.saturating_add(RocksDbWeight::get().writes(1_u64))
 	}
-}
\ No newline at end of file
+}
-- 
GitLab


From 23600076de203dad498d815ff4b7ed2968217c10 Mon Sep 17 00:00:00 2001
From: Branislav Kontur <bkontur@gmail.com>
Date: Fri, 24 Jan 2025 13:32:19 +0100
Subject: [PATCH 109/116] Nits for collectives-westend XCM benchmarks setup
 (#7215)

Closes: https://github.com/paritytech/polkadot-sdk/issues/2904

---------

Co-authored-by: command-bot <>
---
 Cargo.lock                                    |   1 +
 .../collectives-westend/Cargo.toml            |   3 +
 .../collectives-westend/src/lib.rs            | 122 +++++++-
 .../src/weights/pallet_xcm.rs                 | 235 +++++++---------
 .../xcm/pallet_xcm_benchmarks_fungible.rs     | 128 ++++-----
 .../xcm/pallet_xcm_benchmarks_generic.rs      | 262 +++++++++---------
 6 files changed, 419 insertions(+), 332 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock
index 5cc898714d3..df2c58b7f4c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3707,6 +3707,7 @@ dependencies = [
  "pallet-treasury 27.0.0",
  "pallet-utility 28.0.0",
  "pallet-xcm 7.0.0",
+ "pallet-xcm-benchmarks 7.0.0",
  "parachains-common 7.0.0",
  "parachains-runtimes-test-utils 7.0.0",
  "parity-scale-codec",
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml
index 2786321e48e..f9cc54495ae 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml
@@ -65,6 +65,7 @@ sp-version = { workspace = true }
 
 # Polkadot
 pallet-xcm = { workspace = true }
+pallet-xcm-benchmarks = { optional = true, workspace = true }
 polkadot-parachain-primitives = { workspace = true }
 polkadot-runtime-common = { workspace = true }
 westend-runtime-constants = { workspace = true }
@@ -131,6 +132,7 @@ runtime-benchmarks = [
 	"pallet-transaction-payment/runtime-benchmarks",
 	"pallet-treasury/runtime-benchmarks",
 	"pallet-utility/runtime-benchmarks",
+	"pallet-xcm-benchmarks/runtime-benchmarks",
 	"pallet-xcm/runtime-benchmarks",
 	"parachains-common/runtime-benchmarks",
 	"polkadot-parachain-primitives/runtime-benchmarks",
@@ -222,6 +224,7 @@ std = [
 	"pallet-transaction-payment/std",
 	"pallet-treasury/std",
 	"pallet-utility/std",
+	"pallet-xcm-benchmarks?/std",
 	"pallet-xcm/std",
 	"parachain-info/std",
 	"parachains-common/std",
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
index 5eafc2960cc..5e087832f0e 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs
@@ -801,7 +801,6 @@ mod benches {
 		[cumulus_pallet_xcmp_queue, XcmpQueue]
 		[pallet_alliance, Alliance]
 		[pallet_collective, AllianceMotion]
-		[pallet_xcm, PalletXcmExtrinsicsBenchmark::<Runtime>]
 		[pallet_preimage, Preimage]
 		[pallet_scheduler, Scheduler]
 		[pallet_referenda, FellowshipReferenda]
@@ -816,6 +815,11 @@ mod benches {
 		[pallet_treasury, FellowshipTreasury]
 		[pallet_asset_rate, AssetRate]
 		[cumulus_pallet_weight_reclaim, WeightReclaim]
+		// XCM
+		[pallet_xcm, PalletXcmExtrinsicsBenchmark::<Runtime>]
+		// NOTE: Make sure you point to the individual modules below.
+		[pallet_xcm_benchmarks::fungible, XcmBalances]
+		[pallet_xcm_benchmarks::generic, XcmGeneric]
 	);
 }
 
@@ -1065,6 +1069,12 @@ impl_runtime_apis! {
 			use cumulus_pallet_session_benchmarking::Pallet as SessionBench;
 			use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark;
 
+			// This is defined once again in dispatch_benchmark, because list_benchmarks!
+			// and add_benchmarks! are macros exported by define_benchmarks! macros and those types
+			// are referenced in that call.
+			type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::<Runtime>;
+			type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::<Runtime>;
+
 			let mut list = Vec::<BenchmarkList>::new();
 			list_benchmarks!(list, extra);
 
@@ -1093,10 +1103,11 @@ impl_runtime_apis! {
 
 			use cumulus_pallet_session_benchmarking::Pallet as SessionBench;
 			impl cumulus_pallet_session_benchmarking::Config for Runtime {}
+			use xcm_config::WndLocation;
 
 			parameter_types! {
 				pub ExistentialDepositAsset: Option<Asset> = Some((
-					xcm_config::WndLocation::get(),
+					WndLocation::get(),
 					ExistentialDeposit::get()
 				).into());
 			}
@@ -1149,6 +1160,112 @@ impl_runtime_apis! {
 				}
 			}
 
+			impl pallet_xcm_benchmarks::Config for Runtime {
+				type XcmConfig = xcm_config::XcmConfig;
+				type AccountIdConverter = xcm_config::LocationToAccountId;
+				type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper<
+					xcm_config::XcmConfig,
+					ExistentialDepositAsset,
+					xcm_config::PriceForParentDelivery,
+				>;
+				fn valid_destination() -> Result<Location, BenchmarkError> {
+					Ok(WndLocation::get())
+				}
+				fn worst_case_holding(_depositable_count: u32) -> Assets {
+					// just concrete assets according to relay chain.
+					let assets: Vec<Asset> = vec![
+						Asset {
+							id: AssetId(WndLocation::get()),
+							fun: Fungible(1_000_000 * UNITS),
+						}
+					];
+					assets.into()
+				}
+			}
+
+			parameter_types! {
+				pub const TrustedTeleporter: Option<(Location, Asset)> = Some((
+					WndLocation::get(),
+					Asset { fun: Fungible(UNITS), id: AssetId(WndLocation::get()) },
+				));
+				pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None;
+				pub const TrustedReserve: Option<(Location, Asset)> = None;
+			}
+
+			impl pallet_xcm_benchmarks::fungible::Config for Runtime {
+				type TransactAsset = Balances;
+
+				type CheckedAccount = CheckedAccount;
+				type TrustedTeleporter = TrustedTeleporter;
+				type TrustedReserve = TrustedReserve;
+
+				fn get_asset() -> Asset {
+					Asset {
+						id: AssetId(WndLocation::get()),
+						fun: Fungible(UNITS),
+					}
+				}
+			}
+
+			impl pallet_xcm_benchmarks::generic::Config for Runtime {
+				type TransactAsset = Balances;
+				type RuntimeCall = RuntimeCall;
+
+				fn worst_case_response() -> (u64, Response) {
+					(0u64, Response::Version(Default::default()))
+				}
+
+				fn worst_case_asset_exchange() -> Result<(Assets, Assets), BenchmarkError> {
+					Err(BenchmarkError::Skip)
+				}
+
+				fn universal_alias() -> Result<(Location, Junction), BenchmarkError> {
+					Err(BenchmarkError::Skip)
+				}
+
+				fn transact_origin_and_runtime_call() -> Result<(Location, RuntimeCall), BenchmarkError> {
+					Ok((WndLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into()))
+				}
+
+				fn subscribe_origin() -> Result<Location, BenchmarkError> {
+					Ok(WndLocation::get())
+				}
+
+				fn claimable_asset() -> Result<(Location, Location, Assets), BenchmarkError> {
+					let origin = WndLocation::get();
+					let assets: Assets = (AssetId(WndLocation::get()), 1_000 * UNITS).into();
+					let ticket = Location { parents: 0, interior: Here };
+					Ok((origin, ticket, assets))
+				}
+
+				fn fee_asset() -> Result<Asset, BenchmarkError> {
+					Ok(Asset {
+						id: AssetId(WndLocation::get()),
+						fun: Fungible(1_000_000 * UNITS),
+					})
+				}
+
+				fn unlockable_asset() -> Result<(Location, Location, Asset), BenchmarkError> {
+					Err(BenchmarkError::Skip)
+				}
+
+				fn export_message_origin_and_destination(
+				) -> Result<(Location, NetworkId, InteriorLocation), BenchmarkError> {
+					Err(BenchmarkError::Skip)
+				}
+
+				fn alias_origin() -> Result<(Location, Location), BenchmarkError> {
+					// Any location can alias to an internal location.
+					// Here parachain 1000 aliases to an internal account.
+					let origin = Location::new(1, [Parachain(1000)]);
+					let target = Location::new(1, [Parachain(1000), AccountId32 { id: [128u8; 32], network: None }]);
+					Ok((origin, target))
+				}
+			}
+
+			type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::<Runtime>;
+			type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::<Runtime>;
+
 			use frame_support::traits::WhitelistedStorageKeys;
 			let whitelist: Vec<TrackedStorageKey> = AllPalletsWithSystem::whitelisted_storage_keys();
 
@@ -1156,7 +1273,6 @@ impl_runtime_apis! {
 			let params = (&config, &whitelist);
 			add_benchmarks!(params, batches);
 
-			if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) }
 			Ok(batches)
 		}
 	}
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs
index ccf88873c2c..c0389cbcdb4 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs
@@ -17,9 +17,9 @@
 //! Autogenerated weights for `pallet_xcm`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2025-01-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `47a5bbdc8de3`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `17a605d70d1a`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024
 
 // Executed Command:
@@ -56,23 +56,19 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	/// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	/// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	/// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
 	/// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
 	/// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	fn send() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `214`
-		//  Estimated: `3679`
-		// Minimum execution time: 32_779_000 picoseconds.
-		Weight::from_parts(33_417_000, 0)
-			.saturating_add(Weight::from_parts(0, 3679))
-			.saturating_add(T::DbWeight::get().reads(7))
-			.saturating_add(T::DbWeight::get().writes(2))
+		//  Measured:  `111`
+		//  Estimated: `3576`
+		// Minimum execution time: 26_877_000 picoseconds.
+		Weight::from_parts(27_778_000, 0)
+			.saturating_add(Weight::from_parts(0, 3576))
+			.saturating_add(T::DbWeight::get().reads(5))
+			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	/// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
@@ -82,10 +78,6 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	/// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	/// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	/// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `System::Account` (r:1 w:1)
 	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	/// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
@@ -94,13 +86,13 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	fn teleport_assets() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `214`
-		//  Estimated: `3679`
-		// Minimum execution time: 116_031_000 picoseconds.
-		Weight::from_parts(118_863_000, 0)
-			.saturating_add(Weight::from_parts(0, 3679))
-			.saturating_add(T::DbWeight::get().reads(9))
-			.saturating_add(T::DbWeight::get().writes(3))
+		//  Measured:  `111`
+		//  Estimated: `3593`
+		// Minimum execution time: 109_606_000 picoseconds.
+		Weight::from_parts(120_756_000, 0)
+			.saturating_add(Weight::from_parts(0, 3593))
+			.saturating_add(T::DbWeight::get().reads(7))
+			.saturating_add(T::DbWeight::get().writes(2))
 	}
 	/// Storage: `Benchmark::Override` (r:0 w:0)
 	/// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`)
@@ -120,10 +112,6 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	/// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	/// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	/// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `System::Account` (r:1 w:1)
 	/// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	/// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
@@ -132,23 +120,23 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	fn transfer_assets() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `214`
-		//  Estimated: `3679`
-		// Minimum execution time: 116_267_000 picoseconds.
-		Weight::from_parts(119_519_000, 0)
-			.saturating_add(Weight::from_parts(0, 3679))
-			.saturating_add(T::DbWeight::get().reads(9))
-			.saturating_add(T::DbWeight::get().writes(3))
+		//  Measured:  `111`
+		//  Estimated: `3593`
+		// Minimum execution time: 109_165_000 picoseconds.
+		Weight::from_parts(110_899_000, 0)
+			.saturating_add(Weight::from_parts(0, 3593))
+			.saturating_add(T::DbWeight::get().reads(7))
+			.saturating_add(T::DbWeight::get().writes(2))
 	}
 	/// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0)
 	/// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	fn execute() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `103`
-		//  Estimated: `1588`
-		// Minimum execution time: 12_718_000 picoseconds.
-		Weight::from_parts(13_572_000, 0)
-			.saturating_add(Weight::from_parts(0, 1588))
+		//  Measured:  `0`
+		//  Estimated: `1485`
+		// Minimum execution time: 9_494_000 picoseconds.
+		Weight::from_parts(9_917_000, 0)
+			.saturating_add(Weight::from_parts(0, 1485))
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
 	/// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1)
@@ -157,21 +145,18 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 7_568_000 picoseconds.
-		Weight::from_parts(7_913_000, 0)
+		// Minimum execution time: 7_515_000 picoseconds.
+		Weight::from_parts(7_771_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	/// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1)
-	/// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	fn force_default_xcm_version() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 2_225_000 picoseconds.
-		Weight::from_parts(2_473_000, 0)
+		// Minimum execution time: 2_430_000 picoseconds.
+		Weight::from_parts(2_536_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
-			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	/// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1)
 	/// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`)
@@ -181,10 +166,6 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	/// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	/// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	/// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
 	/// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
@@ -193,13 +174,13 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	fn force_subscribe_version_notify() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `145`
-		//  Estimated: `3610`
-		// Minimum execution time: 35_869_000 picoseconds.
-		Weight::from_parts(37_848_000, 0)
-			.saturating_add(Weight::from_parts(0, 3610))
-			.saturating_add(T::DbWeight::get().reads(8))
-			.saturating_add(T::DbWeight::get().writes(5))
+		//  Measured:  `42`
+		//  Estimated: `3507`
+		// Minimum execution time: 28_913_000 picoseconds.
+		Weight::from_parts(29_949_000, 0)
+			.saturating_add(Weight::from_parts(0, 3507))
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(4))
 	}
 	/// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1)
 	/// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`)
@@ -207,10 +188,6 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	/// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	/// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	/// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
 	/// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
@@ -219,13 +196,13 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	fn force_unsubscribe_version_notify() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `363`
-		//  Estimated: `3828`
-		// Minimum execution time: 38_649_000 picoseconds.
-		Weight::from_parts(39_842_000, 0)
-			.saturating_add(Weight::from_parts(0, 3828))
-			.saturating_add(T::DbWeight::get().reads(7))
-			.saturating_add(T::DbWeight::get().writes(4))
+		//  Measured:  `136`
+		//  Estimated: `3601`
+		// Minimum execution time: 30_496_000 picoseconds.
+		Weight::from_parts(31_828_000, 0)
+			.saturating_add(Weight::from_parts(0, 3601))
+			.saturating_add(T::DbWeight::get().reads(5))
+			.saturating_add(T::DbWeight::get().writes(3))
 	}
 	/// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1)
 	/// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
@@ -233,8 +210,8 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 2_223_000 picoseconds.
-		Weight::from_parts(2_483_000, 0)
+		// Minimum execution time: 2_435_000 picoseconds.
+		Weight::from_parts(2_635_000, 0)
 			.saturating_add(Weight::from_parts(0, 0))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -242,11 +219,11 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	fn migrate_supported_version() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `159`
-		//  Estimated: `15999`
-		// Minimum execution time: 24_164_000 picoseconds.
-		Weight::from_parts(24_972_000, 0)
-			.saturating_add(Weight::from_parts(0, 15999))
+		//  Measured:  `22`
+		//  Estimated: `15862`
+		// Minimum execution time: 21_713_000 picoseconds.
+		Weight::from_parts(22_209_000, 0)
+			.saturating_add(Weight::from_parts(0, 15862))
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -254,11 +231,11 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	fn migrate_version_notifiers() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `163`
-		//  Estimated: `16003`
-		// Minimum execution time: 24_604_000 picoseconds.
-		Weight::from_parts(25_047_000, 0)
-			.saturating_add(Weight::from_parts(0, 16003))
+		//  Measured:  `26`
+		//  Estimated: `15866`
+		// Minimum execution time: 22_035_000 picoseconds.
+		Weight::from_parts(22_675_000, 0)
+			.saturating_add(Weight::from_parts(0, 15866))
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -266,11 +243,11 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	fn already_notified_target() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `173`
-		//  Estimated: `18488`
-		// Minimum execution time: 28_088_000 picoseconds.
-		Weight::from_parts(28_431_000, 0)
-			.saturating_add(Weight::from_parts(0, 18488))
+		//  Measured:  `36`
+		//  Estimated: `18351`
+		// Minimum execution time: 24_882_000 picoseconds.
+		Weight::from_parts(25_172_000, 0)
+			.saturating_add(Weight::from_parts(0, 18351))
 			.saturating_add(T::DbWeight::get().reads(7))
 	}
 	/// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1)
@@ -279,44 +256,40 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	/// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	/// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	/// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
 	/// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
 	/// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	fn notify_current_targets() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `212`
-		//  Estimated: `6152`
-		// Minimum execution time: 33_814_000 picoseconds.
-		Weight::from_parts(34_741_000, 0)
-			.saturating_add(Weight::from_parts(0, 6152))
-			.saturating_add(T::DbWeight::get().reads(8))
-			.saturating_add(T::DbWeight::get().writes(3))
+		//  Measured:  `75`
+		//  Estimated: `6015`
+		// Minimum execution time: 28_244_000 picoseconds.
+		Weight::from_parts(28_873_000, 0)
+			.saturating_add(Weight::from_parts(0, 6015))
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
 	}
 	/// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0)
 	/// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	fn notify_target_migration_fail() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `176`
-		//  Estimated: `13541`
-		// Minimum execution time: 18_242_000 picoseconds.
-		Weight::from_parts(18_636_000, 0)
-			.saturating_add(Weight::from_parts(0, 13541))
+		//  Measured:  `39`
+		//  Estimated: `13404`
+		// Minimum execution time: 17_457_000 picoseconds.
+		Weight::from_parts(18_023_000, 0)
+			.saturating_add(Weight::from_parts(0, 13404))
 			.saturating_add(T::DbWeight::get().reads(5))
 	}
 	/// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2)
 	/// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	fn migrate_version_notify_targets() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `170`
-		//  Estimated: `16010`
-		// Minimum execution time: 24_249_000 picoseconds.
-		Weight::from_parts(24_768_000, 0)
-			.saturating_add(Weight::from_parts(0, 16010))
+		//  Measured:  `33`
+		//  Estimated: `15873`
+		// Minimum execution time: 22_283_000 picoseconds.
+		Weight::from_parts(22_783_000, 0)
+			.saturating_add(Weight::from_parts(0, 15873))
 			.saturating_add(T::DbWeight::get().reads(6))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -326,23 +299,19 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	/// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	/// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	/// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	/// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	/// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
 	/// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	/// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
 	/// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	fn migrate_and_notify_old_targets() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `212`
-		//  Estimated: `16052`
-		// Minimum execution time: 47_602_000 picoseconds.
-		Weight::from_parts(48_378_000, 0)
-			.saturating_add(Weight::from_parts(0, 16052))
-			.saturating_add(T::DbWeight::get().reads(12))
-			.saturating_add(T::DbWeight::get().writes(4))
+		//  Measured:  `75`
+		//  Estimated: `15915`
+		// Minimum execution time: 41_244_000 picoseconds.
+		Weight::from_parts(42_264_000, 0)
+			.saturating_add(Weight::from_parts(0, 15915))
+			.saturating_add(T::DbWeight::get().reads(10))
+			.saturating_add(T::DbWeight::get().writes(3))
 	}
 	/// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1)
 	/// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
@@ -350,11 +319,11 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	fn new_query() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `103`
-		//  Estimated: `1588`
-		// Minimum execution time: 5_566_000 picoseconds.
-		Weight::from_parts(5_768_000, 0)
-			.saturating_add(Weight::from_parts(0, 1588))
+		//  Measured:  `0`
+		//  Estimated: `1485`
+		// Minimum execution time: 2_678_000 picoseconds.
+		Weight::from_parts(2_892_000, 0)
+			.saturating_add(Weight::from_parts(0, 1485))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
@@ -362,11 +331,11 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	fn take_response() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `7740`
-		//  Estimated: `11205`
-		// Minimum execution time: 30_821_000 picoseconds.
-		Weight::from_parts(31_250_000, 0)
-			.saturating_add(Weight::from_parts(0, 11205))
+		//  Measured:  `7576`
+		//  Estimated: `11041`
+		// Minimum execution time: 26_677_000 picoseconds.
+		Weight::from_parts(27_470_000, 0)
+			.saturating_add(Weight::from_parts(0, 11041))
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -376,11 +345,11 @@ impl<T: frame_system::Config> pallet_xcm::WeightInfo for WeightInfo<T> {
 	/// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	fn claim_assets() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `160`
-		//  Estimated: `3625`
-		// Minimum execution time: 43_463_000 picoseconds.
-		Weight::from_parts(44_960_000, 0)
-			.saturating_add(Weight::from_parts(0, 3625))
+		//  Measured:  `23`
+		//  Estimated: `3488`
+		// Minimum execution time: 40_143_000 picoseconds.
+		Weight::from_parts(41_712_000, 0)
+			.saturating_add(Weight::from_parts(0, 3488))
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
index 00826cbb8d7..f6a140f3157 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs
@@ -17,26 +17,28 @@
 //! Autogenerated weights for `pallet_xcm_benchmarks::fungible`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-10-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2025-01-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-augrssgt-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `17a605d70d1a`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: Compiled, CHAIN: Some("collectives-westend-dev"), DB CACHE: 1024
 
 // Executed Command:
 // target/production/polkadot-parachain
 // benchmark
 // pallet
-// --steps=50
-// --repeat=20
 // --extrinsic=*
+// --chain=collectives-westend-dev
+// --pallet=pallet_xcm_benchmarks::fungible
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm
 // --wasm-execution=compiled
+// --steps=50
+// --repeat=20
 // --heap-pages=4096
-// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
-// --pallet=pallet_xcm_benchmarks::fungible
-// --chain=collectives-westend-dev
-// --header=./cumulus/file_header.txt
-// --template=./cumulus/templates/xcm-bench-template.hbs
-// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/
+// --template=cumulus/templates/xcm-bench-template.hbs
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -54,8 +56,8 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `101`
 		//  Estimated: `3593`
-		// Minimum execution time: 30_401_000 picoseconds.
-		Weight::from_parts(30_813_000, 3593)
+		// Minimum execution time: 32_692_000 picoseconds.
+		Weight::from_parts(33_469_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -63,33 +65,31 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	pub fn transfer_asset() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `153`
+		//  Measured:  `101`
 		//  Estimated: `6196`
-		// Minimum execution time: 43_150_000 picoseconds.
-		Weight::from_parts(43_919_000, 6196)
+		// Minimum execution time: 42_464_000 picoseconds.
+		Weight::from_parts(43_897_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(2))
 			.saturating_add(T::DbWeight::get().writes(2))
 	}
-	// Storage: `System::Account` (r:2 w:2)
+	// Storage: `System::Account` (r:3 w:3)
 	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0)
+	// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
 	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
 	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	pub fn transfer_reserve_asset() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `223`
-		//  Estimated: `6196`
-		// Minimum execution time: 67_808_000 picoseconds.
-		Weight::from_parts(69_114_000, 6196)
+		//  Measured:  `212`
+		//  Estimated: `8799`
+		// Minimum execution time: 105_472_000 picoseconds.
+		Weight::from_parts(115_465_000, 8799)
 			.saturating_add(T::DbWeight::get().reads(8))
 			.saturating_add(T::DbWeight::get().writes(4))
 	}
@@ -104,51 +104,49 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	}
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0)
+	// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `System::Account` (r:2 w:2)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
 	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
 	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	pub fn initiate_reserve_withdraw() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `70`
-		//  Estimated: `3535`
-		// Minimum execution time: 29_312_000 picoseconds.
-		Weight::from_parts(30_347_000, 3535)
-			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(2))
+		//  Measured:  `212`
+		//  Estimated: `6196`
+		// Minimum execution time: 72_377_000 picoseconds.
+		Weight::from_parts(76_456_000, 6196)
+			.saturating_add(T::DbWeight::get().reads(7))
+			.saturating_add(T::DbWeight::get().writes(3))
 	}
 	pub fn receive_teleported_asset() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 2_283_000 picoseconds.
-		Weight::from_parts(2_448_000, 0)
+		// Minimum execution time: 2_556_000 picoseconds.
+		Weight::from_parts(2_960_000, 0)
 	}
 	// Storage: `System::Account` (r:1 w:1)
 	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	pub fn deposit_asset() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `52`
+		//  Measured:  `0`
 		//  Estimated: `3593`
-		// Minimum execution time: 23_556_000 picoseconds.
-		Weight::from_parts(24_419_000, 3593)
+		// Minimum execution time: 24_560_000 picoseconds.
+		Weight::from_parts(24_926_000, 3593)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0)
+	// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `System::Account` (r:1 w:1)
 	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
@@ -157,54 +155,50 @@ impl<T: frame_system::Config> WeightInfo<T> {
 	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	pub fn deposit_reserve_asset() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `122`
+		//  Measured:  `111`
 		//  Estimated: `3593`
-		// Minimum execution time: 58_342_000 picoseconds.
-		Weight::from_parts(59_598_000, 3593)
-			.saturating_add(T::DbWeight::get().reads(7))
-			.saturating_add(T::DbWeight::get().writes(3))
+		// Minimum execution time: 57_780_000 picoseconds.
+		Weight::from_parts(59_561_000, 3593)
+			.saturating_add(T::DbWeight::get().reads(6))
+			.saturating_add(T::DbWeight::get().writes(2))
 	}
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0)
+	// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
 	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
 	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	pub fn initiate_teleport() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `70`
-		//  Estimated: `3535`
-		// Minimum execution time: 28_285_000 picoseconds.
-		Weight::from_parts(29_016_000, 3535)
-			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(2))
+		//  Measured:  `111`
+		//  Estimated: `3576`
+		// Minimum execution time: 37_041_000 picoseconds.
+		Weight::from_parts(38_101_000, 3576)
+			.saturating_add(T::DbWeight::get().reads(5))
+			.saturating_add(T::DbWeight::get().writes(1))
 	}
-	// Storage: `System::Account` (r:1 w:1)
+	// Storage: `System::Account` (r:2 w:2)
 	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0)
+	// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
 	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
 	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	pub fn initiate_transfer() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `122`
-		//  Estimated: `3593`
-		// Minimum execution time: 65_211_000 picoseconds.
-		Weight::from_parts(67_200_000, 3593)
+		//  Measured:  `111`
+		//  Estimated: `6196`
+		// Minimum execution time: 87_635_000 picoseconds.
+		Weight::from_parts(89_712_000, 6196)
 			.saturating_add(T::DbWeight::get().reads(7))
 			.saturating_add(T::DbWeight::get().writes(3))
 	}
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs
index ae94edc3d73..8e732546437 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs
@@ -17,26 +17,28 @@
 //! Autogenerated weights for `pallet_xcm_benchmarks::generic`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
-//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
 //! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! HOSTNAME: `96ae15bb1012`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
 //! WASM-EXECUTION: Compiled, CHAIN: Some("collectives-westend-dev"), DB CACHE: 1024
 
 // Executed Command:
 // target/production/polkadot-parachain
 // benchmark
 // pallet
-// --steps=50
-// --repeat=20
 // --extrinsic=*
+// --chain=collectives-westend-dev
+// --pallet=pallet_xcm_benchmarks::generic
+// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm
 // --wasm-execution=compiled
+// --steps=50
+// --repeat=20
 // --heap-pages=4096
-// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
-// --pallet=pallet_xcm_benchmarks::generic
-// --chain=collectives-westend-dev
-// --header=./cumulus/file_header.txt
-// --template=./cumulus/templates/xcm-bench-template.hbs
-// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/
+// --template=cumulus/templates/xcm-bench-template.hbs
+// --no-storage-info
+// --no-min-squares
+// --no-median-slopes
 
 #![cfg_attr(rustfmt, rustfmt_skip)]
 #![allow(unused_parens)]
@@ -50,127 +52,145 @@ pub struct WeightInfo<T>(PhantomData<T>);
 impl<T: frame_system::Config> WeightInfo<T> {
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0)
+	// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `System::Account` (r:2 w:2)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
 	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
 	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	pub fn report_holding() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `70`
-		//  Estimated: `3535`
-		// Minimum execution time: 29_015_000 picoseconds.
-		Weight::from_parts(30_359_000, 3535)
-			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(2))
+		//  Measured:  `212`
+		//  Estimated: `6196`
+		// Minimum execution time: 72_839_000 picoseconds.
+		Weight::from_parts(74_957_000, 6196)
+			.saturating_add(T::DbWeight::get().reads(7))
+			.saturating_add(T::DbWeight::get().writes(3))
 	}
 	pub fn buy_execution() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 572_000 picoseconds.
-		Weight::from_parts(637_000, 0)
+		// Minimum execution time: 592_000 picoseconds.
+		Weight::from_parts(646_000, 0)
 	}
+	// Storage: `System::Account` (r:1 w:1)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	pub fn pay_fees() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `3593`
+		// Minimum execution time: 3_630_000 picoseconds.
+		Weight::from_parts(3_843_000, 3593)
+			.saturating_add(T::DbWeight::get().reads(1))
+			.saturating_add(T::DbWeight::get().writes(1))
+	}
+	pub fn asset_claimer() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 1_550_000 picoseconds.
-		Weight::from_parts(1_604_000, 0)
+		// Minimum execution time: 660_000 picoseconds.
+		Weight::from_parts(712_000, 0)
 	}
 	// Storage: `PolkadotXcm::Queries` (r:1 w:0)
 	// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	pub fn query_response() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `32`
-		//  Estimated: `3497`
-		// Minimum execution time: 7_354_000 picoseconds.
-		Weight::from_parts(7_808_000, 3497)
+		//  Measured:  `0`
+		//  Estimated: `3465`
+		// Minimum execution time: 5_996_000 picoseconds.
+		Weight::from_parts(6_277_000, 3465)
 			.saturating_add(T::DbWeight::get().reads(1))
 	}
 	pub fn transact() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 6_716_000 picoseconds.
-		Weight::from_parts(7_067_000, 0)
+		// Minimum execution time: 7_427_000 picoseconds.
+		Weight::from_parts(7_817_000, 0)
 	}
 	pub fn refund_surplus() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 1_280_000 picoseconds.
-		Weight::from_parts(1_355_000, 0)
+		// Minimum execution time: 1_245_000 picoseconds.
+		Weight::from_parts(1_373_000, 0)
 	}
 	pub fn set_error_handler() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 587_000 picoseconds.
-		Weight::from_parts(645_000, 0)
+		// Minimum execution time: 589_000 picoseconds.
+		Weight::from_parts(647_000, 0)
 	}
 	pub fn set_appendix() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 629_000 picoseconds.
-		Weight::from_parts(662_000, 0)
+		// Minimum execution time: 593_000 picoseconds.
+		Weight::from_parts(653_000, 0)
 	}
 	pub fn clear_error() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 590_000 picoseconds.
-		Weight::from_parts(639_000, 0)
+		// Minimum execution time: 599_000 picoseconds.
+		Weight::from_parts(652_000, 0)
 	}
 	pub fn descend_origin() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 651_000 picoseconds.
-		Weight::from_parts(688_000, 0)
+		// Minimum execution time: 620_000 picoseconds.
+		Weight::from_parts(670_000, 0)
+	}
+	pub fn execute_with_origin() -> Weight {
+		// Proof Size summary in bytes:
+		//  Measured:  `0`
+		//  Estimated: `0`
+		// Minimum execution time: 682_000 picoseconds.
+		Weight::from_parts(747_000, 0)
 	}
 	pub fn clear_origin() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 601_000 picoseconds.
-		Weight::from_parts(630_000, 0)
+		// Minimum execution time: 596_000 picoseconds.
+		Weight::from_parts(650_000, 0)
 	}
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0)
+	// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `System::Account` (r:2 w:2)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
 	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
 	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	pub fn report_error() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `70`
-		//  Estimated: `3535`
-		// Minimum execution time: 25_650_000 picoseconds.
-		Weight::from_parts(26_440_000, 3535)
-			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(2))
+		//  Measured:  `212`
+		//  Estimated: `6196`
+		// Minimum execution time: 68_183_000 picoseconds.
+		Weight::from_parts(70_042_000, 6196)
+			.saturating_add(T::DbWeight::get().reads(7))
+			.saturating_add(T::DbWeight::get().writes(3))
 	}
 	// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1)
 	// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`)
 	pub fn claim_asset() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `90`
-		//  Estimated: `3555`
-		// Minimum execution time: 10_492_000 picoseconds.
-		Weight::from_parts(10_875_000, 3555)
+		//  Measured:  `23`
+		//  Estimated: `3488`
+		// Minimum execution time: 9_661_000 picoseconds.
+		Weight::from_parts(9_943_000, 3488)
 			.saturating_add(T::DbWeight::get().reads(1))
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
@@ -178,29 +198,27 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 597_000 picoseconds.
-		Weight::from_parts(647_000, 0)
+		// Minimum execution time: 580_000 picoseconds.
+		Weight::from_parts(652_000, 0)
 	}
 	// Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1)
 	// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`)
+	// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0)
+	// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
 	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
 	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	pub fn subscribe_version() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `38`
-		//  Estimated: `3503`
-		// Minimum execution time: 23_732_000 picoseconds.
-		Weight::from_parts(24_290_000, 3503)
-			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(3))
+		//  Measured:  `42`
+		//  Estimated: `3507`
+		// Minimum execution time: 24_197_000 picoseconds.
+		Weight::from_parts(25_199_000, 3507)
+			.saturating_add(T::DbWeight::get().reads(5))
+			.saturating_add(T::DbWeight::get().writes(2))
 	}
 	// Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1)
 	// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`)
@@ -208,148 +226,134 @@ impl<T: frame_system::Config> WeightInfo<T> {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 2_446_000 picoseconds.
-		Weight::from_parts(2_613_000, 0)
+		// Minimum execution time: 2_720_000 picoseconds.
+		Weight::from_parts(2_881_000, 0)
 			.saturating_add(T::DbWeight::get().writes(1))
 	}
 	pub fn burn_asset() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 960_000 picoseconds.
-		Weight::from_parts(1_045_000, 0)
+		// Minimum execution time: 950_000 picoseconds.
+		Weight::from_parts(1_076_000, 0)
 	}
 	pub fn expect_asset() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 703_000 picoseconds.
-		Weight::from_parts(739_000, 0)
+		// Minimum execution time: 742_000 picoseconds.
+		Weight::from_parts(785_000, 0)
 	}
 	pub fn expect_origin() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 616_000 picoseconds.
-		Weight::from_parts(651_000, 0)
+		// Minimum execution time: 598_000 picoseconds.
+		Weight::from_parts(671_000, 0)
 	}
 	pub fn expect_error() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 621_000 picoseconds.
-		Weight::from_parts(660_000, 0)
+		// Minimum execution time: 571_000 picoseconds.
+		Weight::from_parts(635_000, 0)
 	}
 	pub fn expect_transact_status() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 794_000 picoseconds.
-		Weight::from_parts(831_000, 0)
+		// Minimum execution time: 766_000 picoseconds.
+		Weight::from_parts(835_000, 0)
 	}
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0)
+	// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `System::Account` (r:2 w:2)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
 	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
 	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	pub fn query_pallet() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `70`
-		//  Estimated: `3535`
-		// Minimum execution time: 29_527_000 picoseconds.
-		Weight::from_parts(30_614_000, 3535)
-			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(2))
+		//  Measured:  `212`
+		//  Estimated: `6196`
+		// Minimum execution time: 76_301_000 picoseconds.
+		Weight::from_parts(79_269_000, 6196)
+			.saturating_add(T::DbWeight::get().reads(7))
+			.saturating_add(T::DbWeight::get().writes(3))
 	}
 	pub fn expect_pallet() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 3_189_000 picoseconds.
-		Weight::from_parts(3_296_000, 0)
+		// Minimum execution time: 5_452_000 picoseconds.
+		Weight::from_parts(5_721_000, 0)
 	}
 	// Storage: `ParachainInfo::ParachainId` (r:1 w:0)
 	// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
+	// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0)
+	// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0)
 	// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1)
-	// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
-	// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0)
-	// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+	// Storage: `System::Account` (r:2 w:2)
+	// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`)
 	// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0)
 	// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1)
 	// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
 	pub fn report_transact_status() -> Weight {
 		// Proof Size summary in bytes:
-		//  Measured:  `70`
-		//  Estimated: `3535`
-		// Minimum execution time: 25_965_000 picoseconds.
-		Weight::from_parts(26_468_000, 3535)
-			.saturating_add(T::DbWeight::get().reads(6))
-			.saturating_add(T::DbWeight::get().writes(2))
+		//  Measured:  `212`
+		//  Estimated: `6196`
+		// Minimum execution time: 68_763_000 picoseconds.
+		Weight::from_parts(71_142_000, 6196)
+			.saturating_add(T::DbWeight::get().reads(7))
+			.saturating_add(T::DbWeight::get().writes(3))
 	}
 	pub fn clear_transact_status() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 618_000 picoseconds.
-		Weight::from_parts(659_000, 0)
+		// Minimum execution time: 630_000 picoseconds.
+		Weight::from_parts(676_000, 0)
 	}
 	pub fn set_topic() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 593_000 picoseconds.
-		Weight::from_parts(618_000, 0)
+		// Minimum execution time: 570_000 picoseconds.
+		Weight::from_parts(622_000, 0)
 	}
 	pub fn clear_topic() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 603_000 picoseconds.
-		Weight::from_parts(634_000, 0)
-	}
-	pub fn alias_origin() -> Weight {
-		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 2_000_000 picoseconds.
-		Weight::from_parts(2_000_000, 0)
+		// Minimum execution time: 549_000 picoseconds.
+		Weight::from_parts(603_000, 0)
 	}
 	pub fn set_fees_mode() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 568_000 picoseconds.
-		Weight::from_parts(629_000, 0)
+		// Minimum execution time: 578_000 picoseconds.
+		Weight::from_parts(626_000, 0)
 	}
 	pub fn unpaid_execution() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 598_000 picoseconds.
-		Weight::from_parts(655_000, 0)
-	}
-	pub fn asset_claimer() -> Weight {
-		// Proof Size summary in bytes:
-		//  Measured:  `0`
-		//  Estimated: `0`
-		// Minimum execution time: 707_000 picoseconds.
-		Weight::from_parts(749_000, 0)
+		// Minimum execution time: 594_000 picoseconds.
+		Weight::from_parts(639_000, 0)
 	}
-	pub fn execute_with_origin() -> Weight {
+	pub fn alias_origin() -> Weight {
 		// Proof Size summary in bytes:
 		//  Measured:  `0`
 		//  Estimated: `0`
-		// Minimum execution time: 713_000 picoseconds.
-		Weight::from_parts(776_000, 0)
+		// Minimum execution time: 637_000 picoseconds.
+		Weight::from_parts(676_000, 0)
 	}
 }
-- 
GitLab


From a2c63e8d8a512eca28ed24c3c58ea7609c28b9ee Mon Sep 17 00:00:00 2001
From: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com>
Date: Fri, 24 Jan 2025 15:29:25 +0200
Subject: [PATCH 110/116] fix(cmd bench-omni): build omni-bencher with
 production profile (#7299)

# Description

This PR builds frame-omni-bencher with `production` profile when calling
`/cmd bench-omni` to compute benchmarks for pallets.
Fix proposed by @bkchr , thanks!

Closes #6797.

## Integration

N/A

## Review Notes

More info on #6797, and related to how the fix was tested:
https://github.com/paritytech/polkadot-sdk/issues/6797#issuecomment-2611903102.

---------

Signed-off-by: Iulian Barbu <iulian.barbu@parity.io>
Co-authored-by: command-bot <>
---
 .github/workflows/cmd.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml
index 3d4779064a4..247fc34f1b1 100644
--- a/.github/workflows/cmd.yml
+++ b/.github/workflows/cmd.yml
@@ -348,7 +348,7 @@ jobs:
       - name: Install dependencies for bench
         if: startsWith(needs.get-pr-info.outputs.CMD, 'bench')
         run: |
-          cargo install --path substrate/utils/frame/omni-bencher --locked
+          cargo install --path substrate/utils/frame/omni-bencher --locked --profile production
 
       - name: Run cmd
         id: cmd
-- 
GitLab


From 7710483541ce273df892c77a6e300aaa2efa1dca Mon Sep 17 00:00:00 2001
From: Branislav Kontur <bkontur@gmail.com>
Date: Fri, 24 Jan 2025 16:05:36 +0100
Subject: [PATCH 111/116] Bridges: emulated tests small nits/improvements
 (#7322)

This PR includes minor fixes identified during work on the larger PR:
[https://github.com/paritytech/polkadot-sdk/issues/6906](https://github.com/paritytech/polkadot-sdk/issues/6906).

Specifically, this PR removes the use of
`open_bridge_between_asset_hub_rococo_and_asset_hub_westend`, which is
no longer relevant for BridgeHubs, as bridges are now created with
genesis settings. This function was used in the generic
`test_dry_run_transfer_across_pk_bridge` macro, which could cause
compilation issues when used in other contexts (e.g. fellows repo).

---------

Co-authored-by: cmd[bot] <41898282+github-actions[bot]@users.noreply.github.com>
---
 .../emulated/common/src/macros.rs             |  3 +-
 .../src/tests/asset_transfers.rs              |  3 --
 .../bridge-hub-rococo/src/tests/mod.rs        | 43 -------------------
 .../bridge-hub-rococo/src/tests/send_xcm.rs   |  3 --
 .../src/tests/asset_transfers.rs              |  3 --
 .../bridge-hub-westend/src/tests/mod.rs       | 43 -------------------
 .../bridge-hub-westend/src/tests/send_xcm.rs  |  3 --
 prdoc/pr_7322.prdoc                           |  8 ++++
 8 files changed, 9 insertions(+), 100 deletions(-)
 create mode 100644 prdoc/pr_7322.prdoc

diff --git a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs
index cd2b41e5198..983ac626177 100644
--- a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs
+++ b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs
@@ -644,9 +644,8 @@ macro_rules! test_dry_run_transfer_across_pk_bridge {
 			let transfer_amount = 10_000_000_000_000u128;
 			let initial_balance = transfer_amount * 10;
 
-			// Bridge setup.
+			// AssetHub setup.
 			$sender_asset_hub::force_xcm_version($destination, XCM_VERSION);
-			open_bridge_between_asset_hub_rococo_and_asset_hub_westend();
 
 			<$sender_asset_hub as TestExt>::execute_with(|| {
 				type Runtime = <$sender_asset_hub as Chain>::Runtime;
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs
index a2a61660aff..d1fe94962f1 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs
@@ -25,9 +25,6 @@ fn send_assets_over_bridge<F: FnOnce()>(send_fn: F) {
 	AssetHubRococo::force_xcm_version(asset_hub_westend_location(), XCM_VERSION);
 	BridgeHubRococo::force_xcm_version(bridge_hub_westend_location(), XCM_VERSION);
 
-	// open bridge
-	open_bridge_between_asset_hub_rococo_and_asset_hub_westend();
-
 	// send message over bridge
 	send_fn();
 
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs
index 8aff8775596..265002897ac 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs
@@ -51,9 +51,6 @@ pub(crate) fn bridged_roc_at_ah_westend() -> Location {
 }
 
 // WND and wWND
-pub(crate) fn wnd_at_ah_westend() -> Location {
-	Parent.into()
-}
 pub(crate) fn bridged_wnd_at_ah_rococo() -> Location {
 	Location::new(2, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))])
 }
@@ -240,43 +237,3 @@ pub(crate) fn assert_bridge_hub_westend_message_received() {
 		);
 	})
 }
-
-pub(crate) fn open_bridge_between_asset_hub_rococo_and_asset_hub_westend() {
-	use testnet_parachains_constants::{
-		rococo::currency::UNITS as ROC, westend::currency::UNITS as WND,
-	};
-
-	// open AHR -> AHW
-	BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), ROC * 5);
-	AssetHubRococo::open_bridge(
-		AssetHubRococo::sibling_location_of(BridgeHubRococo::para_id()),
-		[
-			GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)),
-			Parachain(AssetHubWestend::para_id().into()),
-		]
-		.into(),
-		Some((
-			(roc_at_ah_rococo(), ROC * 1).into(),
-			BridgeHubRococo::sovereign_account_id_of(BridgeHubRococo::sibling_location_of(
-				AssetHubRococo::para_id(),
-			)),
-		)),
-	);
-
-	// open AHW -> AHR
-	BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), WND * 5);
-	AssetHubWestend::open_bridge(
-		AssetHubWestend::sibling_location_of(BridgeHubWestend::para_id()),
-		[
-			GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)),
-			Parachain(AssetHubRococo::para_id().into()),
-		]
-		.into(),
-		Some((
-			(wnd_at_ah_westend(), WND * 1).into(),
-			BridgeHubWestend::sovereign_account_id_of(BridgeHubWestend::sibling_location_of(
-				AssetHubWestend::para_id(),
-			)),
-		)),
-	);
-}
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs
index cfcb581238e..799af037869 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs
@@ -74,9 +74,6 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() {
 	// fund sender
 	AssetHubRococo::fund_accounts(vec![(AssetHubRococoSender::get().into(), amount * 10)]);
 
-	// open bridge
-	open_bridge_between_asset_hub_rococo_and_asset_hub_westend();
-
 	// Initially set only default version on all runtimes
 	let newer_xcm_version = xcm::prelude::XCM_VERSION;
 	let older_xcm_version = newer_xcm_version - 1;
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs
index cc90c10b54b..a73c1280b40 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs
@@ -26,9 +26,6 @@ fn send_assets_over_bridge<F: FnOnce()>(send_fn: F) {
 	AssetHubWestend::force_xcm_version(asset_hub_rococo_location(), XCM_VERSION);
 	BridgeHubWestend::force_xcm_version(bridge_hub_rococo_location(), XCM_VERSION);
 
-	// open bridge
-	open_bridge_between_asset_hub_rococo_and_asset_hub_westend();
-
 	// send message over bridge
 	send_fn();
 
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs
index 6c1cdb98e8b..676b2862e66 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs
@@ -52,9 +52,6 @@ pub(crate) fn bridged_wnd_at_ah_rococo() -> Location {
 }
 
 // ROC and wROC
-pub(crate) fn roc_at_ah_rococo() -> Location {
-	Parent.into()
-}
 pub(crate) fn bridged_roc_at_ah_westend() -> Location {
 	Location::new(2, [GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH))])
 }
@@ -250,43 +247,3 @@ pub(crate) fn assert_bridge_hub_rococo_message_received() {
 		);
 	})
 }
-
-pub(crate) fn open_bridge_between_asset_hub_rococo_and_asset_hub_westend() {
-	use testnet_parachains_constants::{
-		rococo::currency::UNITS as ROC, westend::currency::UNITS as WND,
-	};
-
-	// open AHR -> AHW
-	BridgeHubRococo::fund_para_sovereign(AssetHubRococo::para_id(), ROC * 5);
-	AssetHubRococo::open_bridge(
-		AssetHubRococo::sibling_location_of(BridgeHubRococo::para_id()),
-		[
-			GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH)),
-			Parachain(AssetHubWestend::para_id().into()),
-		]
-		.into(),
-		Some((
-			(roc_at_ah_rococo(), ROC * 1).into(),
-			BridgeHubRococo::sovereign_account_id_of(BridgeHubRococo::sibling_location_of(
-				AssetHubRococo::para_id(),
-			)),
-		)),
-	);
-
-	// open AHW -> AHR
-	BridgeHubWestend::fund_para_sovereign(AssetHubWestend::para_id(), WND * 5);
-	AssetHubWestend::open_bridge(
-		AssetHubWestend::sibling_location_of(BridgeHubWestend::para_id()),
-		[
-			GlobalConsensus(ByGenesis(ROCOCO_GENESIS_HASH)),
-			Parachain(AssetHubRococo::para_id().into()),
-		]
-		.into(),
-		Some((
-			(wnd_at_ah_westend(), WND * 1).into(),
-			BridgeHubWestend::sovereign_account_id_of(BridgeHubWestend::sibling_location_of(
-				AssetHubWestend::para_id(),
-			)),
-		)),
-	);
-}
diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs
index 60f8af2242f..e655f06a0f0 100644
--- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs
+++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs
@@ -74,9 +74,6 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() {
 	// fund sender
 	AssetHubWestend::fund_accounts(vec![(AssetHubWestendSender::get().into(), amount * 10)]);
 
-	// open bridge
-	open_bridge_between_asset_hub_rococo_and_asset_hub_westend();
-
 	// Initially set only default version on all runtimes
 	let newer_xcm_version = xcm::prelude::XCM_VERSION;
 	let older_xcm_version = newer_xcm_version - 1;
diff --git a/prdoc/pr_7322.prdoc b/prdoc/pr_7322.prdoc
new file mode 100644
index 00000000000..72c566f7a81
--- /dev/null
+++ b/prdoc/pr_7322.prdoc
@@ -0,0 +1,8 @@
+title: 'Bridges: emulated tests small nits/improvements'
+doc:
+- audience: Runtime Dev
+  description: |-
+    This PR removes the use of `open_bridge_between_asset_hub_rococo_and_asset_hub_westend`. This function was used in the generic `test_dry_run_transfer_across_pk_bridge` macro, which could cause compilation issues when used in other contexts (e.g. fellows repo).
+crates:
+- name: emulated-integration-tests-common
+  bump: patch
-- 
GitLab


From ccd6337f1bfef8ff9da9020fefc25db5a6508da7 Mon Sep 17 00:00:00 2001
From: Iulian Barbu <14218860+iulianbarbu@users.noreply.github.com>
Date: Fri, 24 Jan 2025 18:29:17 +0200
Subject: [PATCH 112/116] sync-templates: enable syncing from stable release
 patches (#7227)

# Description

We're unable to sync templates repos with what's in
polkadot-sdk/templates for stable2412 because the tag which references
the release (`polkadot-stable2412`) is missing the Plan.toml file, which
is needed by PSVM, ran when syncing, to update the templates
dependencies versions in Cargo.tomls. This PR adds a workflow `patch`
input, to enable the workflow to use PSVM with a tag corresponding to a
patch stable release (e.g. `polkadot-stable2412-1`), which will contain
the `Plan.toml` file.

## Integration

This enables the templates repos update with the contents of latest
stable2412 release, in terms of polkadot-sdk/templates, which is
relevant for getting-started docs.

## Review Notes

This PR adds a `patch` input for the `misc-sync-templates.yml` workflow,
which if set will be used with `psvm` accordingly to update templates
repos' dependencies versions based on upcomming patch stable2412-1,
which contains the `Plan.toml`. The workflow will be ran manually after
stable2412-1 is out and this work is tracked under #6329 .

Signed-off-by: Iulian Barbu <iulian.barbu@parity.io>
---
 .github/workflows/misc-sync-templates.yml | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)

diff --git a/.github/workflows/misc-sync-templates.yml b/.github/workflows/misc-sync-templates.yml
index ac66e697562..ce01f010aa7 100644
--- a/.github/workflows/misc-sync-templates.yml
+++ b/.github/workflows/misc-sync-templates.yml
@@ -25,6 +25,10 @@ on:
         description: Enable runner debug logging
         required: false
         default: false
+      patch:
+        description: 'Patch number of the stable release we want to sync with'
+        required: false
+        default: ""
 
 jobs:
   sync-templates:
@@ -139,7 +143,14 @@ jobs:
           rm -f "${{ env.template-path }}/src/lib.rs"
 
       - name: Run psvm on monorepo workspace dependencies
-        run: psvm -o -v ${{ github.event.inputs.stable_release_branch }} -p ./Cargo.toml
+        run: |
+          patch_input="${{ github.event.inputs.patch }}"
+          if [[ -n "$patch_input" ]]; then
+            patch="-$patch_input"
+          else
+            patch=""
+          fi
+          psvm -o -v "${{ github.event.inputs.stable_release_branch }}$patch" -p ./Cargo.toml
         working-directory: polkadot-sdk/
       - name: Copy over required workspace dependencies
         run: |
-- 
GitLab


From 223bd28896cfa7ece1068c70da9f433a08da5554 Mon Sep 17 00:00:00 2001
From: PG Herveou <pgherveou@gmail.com>
Date: Fri, 24 Jan 2025 17:34:15 +0100
Subject: [PATCH 113/116] [pallet-revive] eth-rpc minor fixes (#7325)

- Add option to specify database_url using DATABASE_URL environment
variable
- Add a eth-rpc-tester rust bin that can be used to test deployment
before releasing eth-rpc
- make evm_block non fallible so that it can return an Ok response for
older blocks when the runtime API is not available
- update cargo.lock to integrate changes from
https://github.com/paritytech/subxt/pull/1904

---------

Co-authored-by: cmd[bot] <41898282+github-actions[bot]@users.noreply.github.com>
---
 Cargo.lock                                    |  12 +-
 prdoc/pr_7325.prdoc                           |  11 ++
 substrate/frame/revive/rpc/Cargo.toml         |  28 ++--
 substrate/frame/revive/rpc/examples/README.md |   2 +-
 substrate/frame/revive/rpc/src/cli.rs         |   6 +-
 substrate/frame/revive/rpc/src/client.rs      |  11 +-
 substrate/frame/revive/rpc/src/eth-indexer.rs |   2 +-
 .../frame/revive/rpc/src/eth-rpc-tester.rs    | 157 ++++++++++++++++++
 substrate/frame/revive/rpc/src/example.rs     |   2 -
 substrate/frame/revive/rpc/src/lib.rs         |   4 +-
 10 files changed, 198 insertions(+), 37 deletions(-)
 create mode 100644 prdoc/pr_7325.prdoc
 create mode 100644 substrate/frame/revive/rpc/src/eth-rpc-tester.rs

diff --git a/Cargo.lock b/Cargo.lock
index df2c58b7f4c..e4bd817300f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -8735,7 +8735,7 @@ dependencies = [
  "httpdate",
  "itoa",
  "pin-project-lite",
- "socket2 0.5.7",
+ "socket2 0.4.9",
  "tokio",
  "tower-service",
  "tracing",
@@ -16456,7 +16456,7 @@ checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9"
 dependencies = [
  "bitcoin_hashes 0.13.0",
  "rand",
- "rand_core 0.6.4",
+ "rand_core 0.5.1",
  "serde",
  "unicode-normalization",
 ]
@@ -20721,7 +20721,7 @@ checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302"
 dependencies = [
  "bytes",
  "heck 0.5.0",
- "itertools 0.13.0",
+ "itertools 0.12.1",
  "log",
  "multimap",
  "once_cell",
@@ -20767,7 +20767,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac"
 dependencies = [
  "anyhow",
- "itertools 0.13.0",
+ "itertools 0.12.1",
  "proc-macro2 1.0.86",
  "quote 1.0.37",
  "syn 2.0.87",
@@ -29085,9 +29085,9 @@ dependencies = [
 
 [[package]]
 name = "subxt"
-version = "0.38.0"
+version = "0.38.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c53029d133e4e0cb7933f1fe06f2c68804b956de9bb8fa930ffca44e9e5e4230"
+checksum = "1c17d7ec2359d33133b63c97e28c8b7cd3f0a5bc6ce567ae3aef9d9e85be3433"
 dependencies = [
  "async-trait",
  "derive-where",
diff --git a/prdoc/pr_7325.prdoc b/prdoc/pr_7325.prdoc
new file mode 100644
index 00000000000..788f01cb324
--- /dev/null
+++ b/prdoc/pr_7325.prdoc
@@ -0,0 +1,11 @@
+title: '[pallet-revive] eth-rpc minor fixes'
+doc:
+- audience: Runtime Dev
+  description: |-
+    - Add option to specify database_url from an environment variable
+    - Add  a test-deployment.rs rust script that can be used to test deployment and call of a contract before releasing eth-rpc
+    - Make evm_block non fallible so that it can return an Ok response for older blocks when the runtime API is not available
+    - Update subxt version to integrate changes from https://github.com/paritytech/subxt/pull/1904
+crates:
+- name: pallet-revive-eth-rpc
+  bump: minor
diff --git a/substrate/frame/revive/rpc/Cargo.toml b/substrate/frame/revive/rpc/Cargo.toml
index 9d822f5ff8e..014231f7f3e 100644
--- a/substrate/frame/revive/rpc/Cargo.toml
+++ b/substrate/frame/revive/rpc/Cargo.toml
@@ -17,34 +17,33 @@ path = "src/main.rs"
 name = "eth-indexer"
 path = "src/eth-indexer.rs"
 
+[[bin]]
+name = "eth-rpc-tester"
+path = "src/eth-rpc-tester.rs"
+
 [[example]]
 name = "deploy"
 path = "examples/rust/deploy.rs"
-required-features = ["example"]
 
 [[example]]
 name = "transfer"
 path = "examples/rust/transfer.rs"
-required-features = ["example"]
 
 [[example]]
 name = "rpc-playground"
 path = "examples/rust/rpc-playground.rs"
-required-features = ["example"]
 
 [[example]]
 name = "extrinsic"
 path = "examples/rust/extrinsic.rs"
-required-features = ["example"]
 
 [[example]]
 name = "remark-extrinsic"
 path = "examples/rust/remark-extrinsic.rs"
-required-features = ["example"]
 
 [dependencies]
 anyhow = { workspace = true }
-clap = { workspace = true, features = ["derive"] }
+clap = { workspace = true, features = ["derive", "env"] }
 codec = { workspace = true, features = ["derive"] }
 ethabi = { version = "18.0.0" }
 futures = { workspace = true, features = ["thread-pool"] }
@@ -52,8 +51,9 @@ hex = { workspace = true }
 jsonrpsee = { workspace = true, features = ["full"] }
 log = { workspace = true }
 pallet-revive = { workspace = true, default-features = true }
+pallet-revive-fixtures = { workspace = true, default-features = true }
 prometheus-endpoint = { workspace = true, default-features = true }
-rlp = { workspace = true, optional = true }
+rlp = { workspace = true }
 sc-cli = { workspace = true, default-features = true }
 sc-rpc = { workspace = true, default-features = true }
 sc-rpc-api = { workspace = true, default-features = true }
@@ -62,24 +62,18 @@ sp-arithmetic = { workspace = true, default-features = true }
 sp-core = { workspace = true, default-features = true }
 sp-crypto-hashing = { workspace = true }
 sp-weights = { workspace = true, default-features = true }
-sqlx = { version = "0.8.2", features = [
-	"macros",
-	"runtime-tokio",
-	"sqlite",
+sqlx = { version = "0.8.2", features = ["macros", "runtime-tokio", "sqlite"] }
+subxt = { workspace = true, default-features = true, features = [
+	"reconnecting-rpc-client",
 ] }
-subxt = { workspace = true, default-features = true, features = ["reconnecting-rpc-client"] }
-subxt-signer = { workspace = true, optional = true, features = [
+subxt-signer = { workspace = true, features = [
 	"unstable-eth",
 ] }
 thiserror = { workspace = true }
 tokio = { workspace = true, features = ["full"] }
 
-[features]
-example = ["rlp", "subxt-signer"]
-
 [dev-dependencies]
 env_logger = { workspace = true }
-pallet-revive-fixtures = { workspace = true, default-features = true }
 static_init = { workspace = true }
 substrate-cli-test-utils = { workspace = true }
 subxt-signer = { workspace = true, features = ["unstable-eth"] }
diff --git a/substrate/frame/revive/rpc/examples/README.md b/substrate/frame/revive/rpc/examples/README.md
index b9a2756b381..1079c254b9c 100644
--- a/substrate/frame/revive/rpc/examples/README.md
+++ b/substrate/frame/revive/rpc/examples/README.md
@@ -42,7 +42,7 @@ RUST_LOG="info,eth-rpc=debug" cargo run -p pallet-revive-eth-rpc -- --dev
 Run one of the examples from the `examples` directory to send a transaction to the node:
 
 ```bash
-RUST_LOG="info,eth-rpc=debug" cargo run -p pallet-revive-eth-rpc --features example --example deploy
+RUST_LOG="info,eth-rpc=debug" cargo run -p pallet-revive-eth-rpc --example deploy
 ```
 
 ## JS examples
diff --git a/substrate/frame/revive/rpc/src/cli.rs b/substrate/frame/revive/rpc/src/cli.rs
index d63d596ab7a..b6c57d2c3b0 100644
--- a/substrate/frame/revive/rpc/src/cli.rs
+++ b/substrate/frame/revive/rpc/src/cli.rs
@@ -19,7 +19,7 @@ use crate::{
 	client::{connect, Client},
 	BlockInfoProvider, BlockInfoProviderImpl, CacheReceiptProvider, DBReceiptProvider,
 	EthRpcServer, EthRpcServerImpl, ReceiptProvider, SystemHealthRpcServer,
-	SystemHealthRpcServerImpl,
+	SystemHealthRpcServerImpl, LOG_TARGET,
 };
 use clap::Parser;
 use futures::{pin_mut, FutureExt};
@@ -52,7 +52,7 @@ pub struct CliCommand {
 	/// The database used to store Ethereum transaction hashes.
 	/// This is only useful if the node needs to act as an archive node and respond to Ethereum RPC
 	/// queries for transactions that are not in the in memory cache.
-	#[clap(long)]
+	#[clap(long, env = "DATABASE_URL")]
 	pub database_url: Option<String>,
 
 	/// If true, we will only read from the database and not write to it.
@@ -148,6 +148,7 @@ pub fn run(cmd: CliCommand) -> anyhow::Result<()> {
 				Arc::new(BlockInfoProviderImpl::new(cache_size, api.clone(), rpc.clone()));
 			let receipt_provider: Arc<dyn ReceiptProvider> =
 				if let Some(database_url) = database_url.as_ref() {
+					log::info!(target: LOG_TARGET, "🔗 Connecting to provided database");
 					Arc::new((
 						CacheReceiptProvider::default(),
 						DBReceiptProvider::new(
@@ -158,6 +159,7 @@ pub fn run(cmd: CliCommand) -> anyhow::Result<()> {
 						.await?,
 					))
 				} else {
+					log::info!(target: LOG_TARGET, "🔌 No database provided, using in-memory cache");
 					Arc::new(CacheReceiptProvider::default())
 				};
 
diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs
index 440972c7a68..47e439f0685 100644
--- a/substrate/frame/revive/rpc/src/client.rs
+++ b/substrate/frame/revive/rpc/src/client.rs
@@ -646,9 +646,9 @@ impl Client {
 		&self,
 		block: Arc<SubstrateBlock>,
 		hydrated_transactions: bool,
-	) -> Result<Block, ClientError> {
+	) -> Block {
 		let runtime_api = self.api.runtime_api().at(block.hash());
-		let gas_limit = Self::block_gas_limit(&runtime_api).await?;
+		let gas_limit = Self::block_gas_limit(&runtime_api).await.unwrap_or_default();
 
 		let header = block.header();
 		let timestamp = extract_block_timestamp(&block).await.unwrap_or_default();
@@ -658,7 +658,7 @@ impl Client {
 		let state_root = header.state_root.0.into();
 		let extrinsics_root = header.extrinsics_root.0.into();
 
-		let receipts = extract_receipts_from_block(&block).await?;
+		let receipts = extract_receipts_from_block(&block).await.unwrap_or_default();
 		let gas_used =
 			receipts.iter().fold(U256::zero(), |acc, (_, receipt)| acc + receipt.gas_used);
 		let transactions = if hydrated_transactions {
@@ -675,7 +675,7 @@ impl Client {
 				.into()
 		};
 
-		Ok(Block {
+		Block {
 			hash: block.hash(),
 			parent_hash,
 			state_root,
@@ -689,7 +689,7 @@ impl Client {
 			receipts_root: extrinsics_root,
 			transactions,
 			..Default::default()
-		})
+		}
 	}
 
 	/// Convert a weight to a fee.
@@ -697,7 +697,6 @@ impl Client {
 		runtime_api: &subxt::runtime_api::RuntimeApi<SrcChainConfig, OnlineClient<SrcChainConfig>>,
 	) -> Result<U256, ClientError> {
 		let payload = subxt_client::apis().revive_api().block_gas_limit();
-
 		let gas_limit = runtime_api.call(payload).await?;
 		Ok(*gas_limit)
 	}
diff --git a/substrate/frame/revive/rpc/src/eth-indexer.rs b/substrate/frame/revive/rpc/src/eth-indexer.rs
index 3e7f6b6fa91..894143be0a5 100644
--- a/substrate/frame/revive/rpc/src/eth-indexer.rs
+++ b/substrate/frame/revive/rpc/src/eth-indexer.rs
@@ -37,7 +37,7 @@ pub struct CliCommand {
 	pub oldest_block: Option<SubstrateBlockNumber>,
 
 	/// The database used to store Ethereum transaction hashes.
-	#[clap(long)]
+	#[clap(long, env = "DATABASE_URL")]
 	pub database_url: String,
 
 	#[allow(missing_docs)]
diff --git a/substrate/frame/revive/rpc/src/eth-rpc-tester.rs b/substrate/frame/revive/rpc/src/eth-rpc-tester.rs
new file mode 100644
index 00000000000..0ddad6874df
--- /dev/null
+++ b/substrate/frame/revive/rpc/src/eth-rpc-tester.rs
@@ -0,0 +1,157 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+use clap::Parser;
+use jsonrpsee::http_client::HttpClientBuilder;
+use pallet_revive::evm::{Account, BlockTag, ReceiptInfo};
+use pallet_revive_eth_rpc::{
+	example::{wait_for_receipt, TransactionBuilder},
+	EthRpcClient,
+};
+use tokio::{
+	io::{AsyncBufReadExt, BufReader},
+	process::{Child, ChildStderr, Command},
+	signal::unix::{signal, SignalKind},
+};
+
+const DOCKER_CONTAINER_NAME: &str = "eth-rpc-test";
+
+#[derive(Parser, Debug)]
+#[clap(author, about, version)]
+pub struct CliCommand {
+	/// The parity docker image e.g eth-rpc:master-fb2e414f
+	#[clap(long, default_value = "eth-rpc:master-fb2e414f")]
+	docker_image: String,
+
+	/// The docker binary
+	/// Either docker or podman
+	#[clap(long, default_value = "docker")]
+	docker_bin: String,
+}
+
+#[tokio::main]
+async fn main() -> anyhow::Result<()> {
+	let CliCommand { docker_bin, docker_image, .. } = CliCommand::parse();
+
+	let mut docker_process = start_docker(&docker_bin, &docker_image)?;
+	let stderr = docker_process.stderr.take().unwrap();
+
+	tokio::select! {
+		result = docker_process.wait() => {
+			println!("docker failed: {result:?}");
+		}
+		_ = interrupt() => {
+			kill_docker().await?;
+		}
+		_ = test_eth_rpc(stderr) => {
+			kill_docker().await?;
+		}
+	}
+
+	Ok(())
+}
+
+async fn interrupt() {
+	let mut sigint = signal(SignalKind::interrupt()).expect("failed to listen for SIGINT");
+	let mut sigterm = signal(SignalKind::terminate()).expect("failed to listen for SIGTERM");
+
+	tokio::select! {
+		_ = sigint.recv() => {},
+		_ = sigterm.recv() => {},
+	}
+}
+
+fn start_docker(docker_bin: &str, docker_image: &str) -> anyhow::Result<Child> {
+	let docker_process = Command::new(docker_bin)
+		.args([
+			"run",
+			"--name",
+			DOCKER_CONTAINER_NAME,
+			"--rm",
+			"-p",
+			"8545:8545",
+			&format!("docker.io/paritypr/{docker_image}"),
+			"--node-rpc-url",
+			"wss://westend-asset-hub-rpc.polkadot.io",
+			"--rpc-cors",
+			"all",
+			"--unsafe-rpc-external",
+			"--log=sc_rpc_server:info",
+		])
+		.stderr(std::process::Stdio::piped())
+		.kill_on_drop(true)
+		.spawn()?;
+
+	Ok(docker_process)
+}
+
+async fn kill_docker() -> anyhow::Result<()> {
+	Command::new("docker").args(["kill", DOCKER_CONTAINER_NAME]).output().await?;
+	Ok(())
+}
+
+async fn test_eth_rpc(stderr: ChildStderr) -> anyhow::Result<()> {
+	let mut reader = BufReader::new(stderr).lines();
+	while let Some(line) = reader.next_line().await? {
+		println!("{line}");
+		if line.contains("Running JSON-RPC server") {
+			break;
+		}
+	}
+
+	let account = Account::default();
+	let data = vec![];
+	let (bytes, _) = pallet_revive_fixtures::compile_module("dummy")?;
+	let input = bytes.into_iter().chain(data).collect::<Vec<u8>>();
+
+	println!("Account:");
+	println!("- address: {:?}", account.address());
+	let client = HttpClientBuilder::default().build("http://localhost:8545")?;
+
+	let nonce = client.get_transaction_count(account.address(), BlockTag::Latest.into()).await?;
+	let balance = client.get_balance(account.address(), BlockTag::Latest.into()).await?;
+	println!("-  nonce: {nonce:?}");
+	println!("-  balance: {balance:?}");
+
+	println!("\n\n=== Deploying dummy contract ===\n\n");
+	let hash = TransactionBuilder::default().input(input).send(&client).await?;
+
+	println!("Hash: {hash:?}");
+	println!("Waiting for receipt...");
+	let ReceiptInfo { block_number, gas_used, contract_address, .. } =
+		wait_for_receipt(&client, hash).await?;
+
+	let contract_address = contract_address.unwrap();
+	println!("\nReceipt:");
+	println!("Block explorer: https://westend-asset-hub-eth-explorer.parity.io/{hash:?}");
+	println!("- Block number: {block_number}");
+	println!("- Gas used: {gas_used}");
+	println!("- Address: {contract_address:?}");
+
+	println!("\n\n=== Calling dummy contract ===\n\n");
+	let hash = TransactionBuilder::default().to(contract_address).send(&client).await?;
+
+	println!("Hash: {hash:?}");
+	println!("Waiting for receipt...");
+
+	let ReceiptInfo { block_number, gas_used, to, .. } = wait_for_receipt(&client, hash).await?;
+	println!("\nReceipt:");
+	println!("Block explorer: https://westend-asset-hub-eth-explorer.parity.io/{hash:?}");
+	println!("- Block number: {block_number}");
+	println!("- Gas used: {gas_used}");
+	println!("- To: {to:?}");
+	Ok(())
+}
diff --git a/substrate/frame/revive/rpc/src/example.rs b/substrate/frame/revive/rpc/src/example.rs
index 3b9a33296ef..aad5b4fbc34 100644
--- a/substrate/frame/revive/rpc/src/example.rs
+++ b/substrate/frame/revive/rpc/src/example.rs
@@ -15,8 +15,6 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 //! Example utilities
-#![cfg(any(feature = "example", test))]
-
 use crate::{EthRpcClient, ReceiptInfo};
 use anyhow::Context;
 use pallet_revive::evm::{
diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs
index 5e1341e2a29..fcf93fa6c0d 100644
--- a/substrate/frame/revive/rpc/src/lib.rs
+++ b/substrate/frame/revive/rpc/src/lib.rs
@@ -214,7 +214,7 @@ impl EthRpcServer for EthRpcServerImpl {
 		let Some(block) = self.client.block_by_hash(&block_hash).await? else {
 			return Ok(None);
 		};
-		let block = self.client.evm_block(block, hydrated_transactions).await?;
+		let block = self.client.evm_block(block, hydrated_transactions).await;
 		Ok(Some(block))
 	}
 
@@ -254,7 +254,7 @@ impl EthRpcServer for EthRpcServerImpl {
 		let Some(block) = self.client.block_by_number_or_tag(&block).await? else {
 			return Ok(None);
 		};
-		let block = self.client.evm_block(block, hydrated_transactions).await?;
+		let block = self.client.evm_block(block, hydrated_transactions).await;
 		Ok(Some(block))
 	}
 
-- 
GitLab


From dcbea60cc7a280f37986f2f815ec3fcff4758be5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= <alex.theissen@me.com>
Date: Fri, 24 Jan 2025 19:20:09 +0100
Subject: [PATCH 114/116] revive: Fix compilation of `uapi` crate when
 `unstable-hostfn` is not set (#7318)

This regression was introduced with some of the recent PRs. Regression
fixed and test added.

---------

Co-authored-by: cmd[bot] <41898282+github-actions[bot]@users.noreply.github.com>
---
 .github/workflows/build-misc.yml        | 28 +++++++++++++++++++++++++
 prdoc/pr_7318.prdoc                     |  8 +++++++
 substrate/frame/revive/uapi/src/host.rs | 26 +++++++++++------------
 3 files changed, 49 insertions(+), 13 deletions(-)
 create mode 100644 prdoc/pr_7318.prdoc

diff --git a/.github/workflows/build-misc.yml b/.github/workflows/build-misc.yml
index 335c2628202..e1ef29f305d 100644
--- a/.github/workflows/build-misc.yml
+++ b/.github/workflows/build-misc.yml
@@ -46,6 +46,34 @@ jobs:
           app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }}
           app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }}
 
+  # As part of our test fixtures we build the revive-uapi crate always with the `unstable-hostfn` feature.
+  # To make sure that it won't break for users downstream which are not setting this feature
+  # It doesn't need to produce working code so we just use a similar enough RISC-V target
+  check-revive-stable-uapi-polkavm:
+    timeout-minutes: 30
+    needs: [preflight]
+    runs-on: ${{ needs.preflight.outputs.RUNNER }}
+    container:
+      image: ${{ needs.preflight.outputs.IMAGE }}
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v4
+
+      - name: Check Rust
+        run: |
+          rustup show
+          rustup +nightly show
+
+      - name: Build
+        id: required
+        run: forklift cargo +nightly check -p pallet-revive-uapi --no-default-features --target riscv64imac-unknown-none-elf -Zbuild-std=core
+      - name: Stop all workflows if failed
+        if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }}
+        uses: ./.github/actions/workflow-stopper
+        with:
+          app-id: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_ID }}
+          app-key: ${{ secrets.WORKFLOW_STOPPER_RUNNER_APP_KEY }}
+
   build-subkey:
     timeout-minutes: 20
     needs: [preflight]
diff --git a/prdoc/pr_7318.prdoc b/prdoc/pr_7318.prdoc
new file mode 100644
index 00000000000..ec41b648a9c
--- /dev/null
+++ b/prdoc/pr_7318.prdoc
@@ -0,0 +1,8 @@
+title: 'revive: Fix compilation of `uapi` crate when `unstable-hostfn` is not set'
+doc:
+- audience: Runtime Dev
+  description: This regression was introduced with some of the recent PRs. Regression
+    fixed and test added.
+crates:
+- name: pallet-revive-uapi
+  bump: minor
diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs
index 3e5cf0eb0c2..130cbf97ad5 100644
--- a/substrate/frame/revive/uapi/src/host.rs
+++ b/substrate/frame/revive/uapi/src/host.rs
@@ -144,18 +144,6 @@ pub trait HostFn: private::Sealed {
 	/// - `output`: A reference to the output data buffer to write the origin's address.
 	fn origin(output: &mut [u8; 20]);
 
-	/// Retrieve the account id for a specified address.
-	///
-	/// # Parameters
-	///
-	/// - `addr`: A `H160` address.
-	/// - `output`: A reference to the output data buffer to write the account id.
-	///
-	/// # Note
-	///
-	/// If no mapping exists for `addr`, the fallback account id will be returned.
-	fn to_account_id(addr: &[u8; 20], output: &mut [u8]);
-
 	/// Retrieve the code hash for a specified contract address.
 	///
 	/// # Parameters
@@ -415,9 +403,21 @@ pub trait HostFn: private::Sealed {
 	/// # Parameters
 	///
 	/// - `output`: A reference to the output data buffer to write the block number.
-	#[unstable_hostfn]
 	fn block_number(output: &mut [u8; 32]);
 
+	/// Retrieve the account id for a specified address.
+	///
+	/// # Parameters
+	///
+	/// - `addr`: A `H160` address.
+	/// - `output`: A reference to the output data buffer to write the account id.
+	///
+	/// # Note
+	///
+	/// If no mapping exists for `addr`, the fallback account id will be returned.
+	#[unstable_hostfn]
+	fn to_account_id(addr: &[u8; 20], output: &mut [u8]);
+
 	/// Stores the block hash of the given block number into the supplied buffer.
 	///
 	/// # Parameters
-- 
GitLab


From a31d26dc30d90ca3b228d07fda8d3f94da6aa155 Mon Sep 17 00:00:00 2001
From: Andrei Eres <eresav@me.com>
Date: Fri, 24 Jan 2025 20:44:31 +0100
Subject: [PATCH 115/116] Fix the link to the chain snapshots (#7330)

The link to Polkachu is not working
---
 substrate/utils/frame/benchmarking-cli/src/storage/README.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/substrate/utils/frame/benchmarking-cli/src/storage/README.md b/substrate/utils/frame/benchmarking-cli/src/storage/README.md
index 95c83d2edbc..955b52a248c 100644
--- a/substrate/utils/frame/benchmarking-cli/src/storage/README.md
+++ b/substrate/utils/frame/benchmarking-cli/src/storage/README.md
@@ -13,7 +13,7 @@ Running the command on Substrate itself is not verify meaningful, since the gene
 used.
 
 The output for the Polkadot client with a recent chain snapshot will give you a better impression. A recent snapshot can
-be downloaded from [Polkachu].
+be downloaded from [Polkadot Snapshots].
 Then run (remove the `--db=paritydb` if you have a RocksDB snapshot):
 ```sh
 cargo run --profile=production -- benchmark storage --dev --state-version=0 --db=paritydb --weight-path runtime/polkadot/constants/src/weights
@@ -106,6 +106,6 @@ write: 71_347 * constants::WEIGHT_REF_TIME_PER_NANOS,
 License: Apache-2.0
 
 <!-- LINKS -->
-[Polkachu]: https://polkachu.com/snapshots
+[Polkadot Snapshots]: https://snapshots.polkadot.io
 [paritydb_weights.rs]:
     https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/paritydb_weights.rs#L60
-- 
GitLab


From 682f8cd22f5bcb76d1b98820b62be49d11deae10 Mon Sep 17 00:00:00 2001
From: Guillaume Thiolliere <gui.thiolliere@gmail.com>
Date: Sat, 25 Jan 2025 12:04:45 +0900
Subject: [PATCH 116/116] `set_validation_data` register weight manually, do
 not use refund when the pre dispatch is zero. (#7327)

Related https://github.com/paritytech/polkadot-sdk/issues/6772

For an extrinsic, in the post dispatch info, the actual weight is only
used to reclaim unused weight. If the actual weight is more than the pre
dispatch weight, then the extrinsic is using the minimum, e.g., the
weight used registered in pre dispatch.

In parachain-system pallet one call is `set_validation_data`. This call
is returning an actual weight, but the pre-dispatch weight is 0.

This PR fix the disregard of actual weight of `set_validation_data` by
registering it manually.
---
 cumulus/pallets/parachain-system/src/lib.rs | 14 ++++++++++----
 prdoc/pr_7327.prdoc                         | 11 +++++++++++
 2 files changed, 21 insertions(+), 4 deletions(-)
 create mode 100644 prdoc/pr_7327.prdoc

diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs
index 6857b08e66b..fa754ea29cc 100644
--- a/cumulus/pallets/parachain-system/src/lib.rs
+++ b/cumulus/pallets/parachain-system/src/lib.rs
@@ -45,7 +45,7 @@ use cumulus_primitives_core::{
 use cumulus_primitives_parachain_inherent::{MessageQueueChain, ParachainInherentData};
 use frame_support::{
 	defensive,
-	dispatch::{DispatchResult, Pays, PostDispatchInfo},
+	dispatch::DispatchResult,
 	ensure,
 	inherent::{InherentData, InherentIdentifier, ProvideInherent},
 	traits::{Get, HandleMessage},
@@ -567,11 +567,12 @@ pub mod pallet {
 		/// if the appropriate time has come.
 		#[pallet::call_index(0)]
 		#[pallet::weight((0, DispatchClass::Mandatory))]
-		// TODO: This weight should be corrected.
+		// TODO: This weight should be corrected. Currently the weight is registered manually in the
+		// call with `register_extra_weight_unchecked`.
 		pub fn set_validation_data(
 			origin: OriginFor<T>,
 			data: ParachainInherentData,
-		) -> DispatchResultWithPostInfo {
+		) -> DispatchResult {
 			ensure_none(origin)?;
 			assert!(
 				!<ValidationData<T>>::exists(),
@@ -692,7 +693,12 @@ pub mod pallet {
 				vfp.relay_parent_number,
 			));
 
-			Ok(PostDispatchInfo { actual_weight: Some(total_weight), pays_fee: Pays::No })
+			frame_system::Pallet::<T>::register_extra_weight_unchecked(
+				total_weight,
+				DispatchClass::Mandatory,
+			);
+
+			Ok(())
 		}
 
 		#[pallet::call_index(1)]
diff --git a/prdoc/pr_7327.prdoc b/prdoc/pr_7327.prdoc
new file mode 100644
index 00000000000..bb2d7a671af
--- /dev/null
+++ b/prdoc/pr_7327.prdoc
@@ -0,0 +1,11 @@
+title: Correctly register the weight n `set_validation_data` in `cumulus-pallet-parachain-system`
+
+doc:
+  - audience: Runtime Dev
+    description: |
+        The actual weight of the call was register as a refund, but the pre-dispatch weight is 0,
+        and we can't refund from 0. Now the actual weight is registered manually instead of ignored.
+
+crates:
+  - name: cumulus-pallet-parachain-system
+    bump: patch
-- 
GitLab