diff --git a/substrate/Cargo.lock b/substrate/Cargo.lock
index 4989de10ca47fc07fc04ab163274c539ebb7536b..6cca0a018ca72b31414a72edbe87635f3bfcd4b7 100644
--- a/substrate/Cargo.lock
+++ b/substrate/Cargo.lock
@@ -477,7 +477,7 @@ dependencies = [
  "ark-ff",
  "ark-std",
  "tracing",
- "tracing-subscriber 0.2.25",
+ "tracing-subscriber",
 ]
 
 [[package]]
@@ -2908,22 +2908,21 @@ name = "frame-remote-externalities"
 version = "0.10.0-dev"
 dependencies = [
  "async-recursion",
- "frame-support",
  "futures",
  "indicatif",
  "jsonrpsee",
  "log",
- "pallet-elections-phragmen",
  "parity-scale-codec",
  "serde",
  "sp-core",
  "sp-io",
  "sp-runtime",
+ "sp-state-machine",
+ "sp-tracing",
  "spinners",
  "substrate-rpc-client",
  "tokio",
  "tokio-retry",
- "tracing-subscriber 0.3.17",
 ]
 
 [[package]]
@@ -5071,15 +5070,6 @@ dependencies = [
  "regex-automata",
 ]
 
-[[package]]
-name = "matchers"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
-dependencies = [
- "regex-automata",
-]
-
 [[package]]
 name = "matches"
 version = "0.1.10"
@@ -5877,16 +5867,6 @@ version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be"
 
-[[package]]
-name = "nu-ansi-term"
-version = "0.46.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
-dependencies = [
- "overload",
- "winapi",
-]
-
 [[package]]
 name = "num"
 version = "0.4.0"
@@ -6065,12 +6045,6 @@ dependencies = [
  "winapi",
 ]
 
-[[package]]
-name = "overload"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
-
 [[package]]
 name = "p256"
 version = "0.11.1"
@@ -9751,7 +9725,7 @@ dependencies = [
  "substrate-test-runtime",
  "tempfile",
  "tracing",
- "tracing-subscriber 0.2.25",
+ "tracing-subscriber",
  "wat",
 ]
 
@@ -10449,7 +10423,7 @@ dependencies = [
  "thiserror",
  "tracing",
  "tracing-log",
- "tracing-subscriber 0.2.25",
+ "tracing-subscriber",
 ]
 
 [[package]]
@@ -11847,7 +11821,7 @@ dependencies = [
  "sp-std",
  "tracing",
  "tracing-core",
- "tracing-subscriber 0.2.25",
+ "tracing-subscriber",
 ]
 
 [[package]]
@@ -12941,7 +12915,7 @@ dependencies = [
  "ansi_term",
  "chrono",
  "lazy_static",
- "matchers 0.0.1",
+ "matchers",
  "parking_lot 0.11.2",
  "regex",
  "serde",
@@ -12955,24 +12929,6 @@ dependencies = [
  "tracing-serde",
 ]
 
-[[package]]
-name = "tracing-subscriber"
-version = "0.3.17"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77"
-dependencies = [
- "matchers 0.1.0",
- "nu-ansi-term",
- "once_cell",
- "regex",
- "sharded-slab",
- "smallvec",
- "thread_local",
- "tracing",
- "tracing-core",
- "tracing-log",
-]
-
 [[package]]
 name = "trie-bench"
 version = "0.37.0"
diff --git a/substrate/bin/node-template/node/src/service.rs b/substrate/bin/node-template/node/src/service.rs
index 36be0b1a938bd0940be4ebab7ddfdb9ce33e2f4e..7303f5cd6dd6d2eb23131fb102ef11ae955f4806 100644
--- a/substrate/bin/node-template/node/src/service.rs
+++ b/substrate/bin/node-template/node/src/service.rs
@@ -49,7 +49,7 @@ pub fn new_partial(
 		FullClient,
 		FullBackend,
 		FullSelectChain,
-		sc_consensus::DefaultImportQueue<Block, FullClient>,
+		sc_consensus::DefaultImportQueue<Block>,
 		sc_transaction_pool::FullPool<Block, FullClient>,
 		(
 			sc_consensus_grandpa::GrandpaBlockImport<
diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs
index 5ee538d18d6a8846109eb37609dd825cc70456d9..b877aa7350228ca7b0d2a766ba9aba0e8b96b71c 100644
--- a/substrate/bin/node/cli/benches/block_production.rs
+++ b/substrate/bin/node/cli/benches/block_production.rs
@@ -112,13 +112,7 @@ fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic {
 	.into()
 }
 
-fn import_block(
-	mut client: &FullClient,
-	built: BuiltBlock<
-		node_primitives::Block,
-		<FullClient as sp_api::CallApiAt<node_primitives::Block>>::StateBackend,
-	>,
-) {
+fn import_block(mut client: &FullClient, built: BuiltBlock<node_primitives::Block>) {
 	let mut params = BlockImportParams::new(BlockOrigin::File, built.block.header);
 	params.state_action =
 		StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(built.storage_changes));
diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs
index e857f96f0056a2747ef67052424d90c96bee7d56..ecca5c60db5156bbda32e012ed607db74e2d9fec 100644
--- a/substrate/bin/node/cli/src/service.rs
+++ b/substrate/bin/node/cli/src/service.rs
@@ -140,7 +140,7 @@ pub fn new_partial(
 		FullClient,
 		FullBackend,
 		FullSelectChain,
-		sc_consensus::DefaultImportQueue<Block, FullClient>,
+		sc_consensus::DefaultImportQueue<Block>,
 		sc_transaction_pool::FullPool<Block, FullClient>,
 		(
 			impl Fn(
diff --git a/substrate/bin/node/testing/src/client.rs b/substrate/bin/node/testing/src/client.rs
index 01495525a9c8c9e03b08c7e325a59140cb70336b..c55867360bd62c7e225f6fcddfb333f259bdb19f 100644
--- a/substrate/bin/node/testing/src/client.rs
+++ b/substrate/bin/node/testing/src/client.rs
@@ -36,9 +36,6 @@ pub type Client = client::Client<
 	kitchensink_runtime::RuntimeApi,
 >;
 
-/// Transaction for kitchensink-runtime.
-pub type Transaction = sc_client_api::backend::TransactionFor<Backend, node_primitives::Block>;
-
 /// Genesis configuration parameters for `TestClient`.
 #[derive(Default)]
 pub struct GenesisParameters;
diff --git a/substrate/client/api/src/backend.rs b/substrate/client/api/src/backend.rs
index 465e1988478d70d051e5380c6a59be7f17d1f20b..2d8fdef77cdb9064076c3d38188d72bccedfdf9e 100644
--- a/substrate/client/api/src/backend.rs
+++ b/substrate/client/api/src/backend.rs
@@ -36,17 +36,11 @@ use sp_storage::{ChildInfo, StorageData, StorageKey};
 
 use crate::{blockchain::Backend as BlockchainBackend, UsageInfo};
 
-pub use sp_state_machine::{Backend as StateBackend, KeyValueStates};
+pub use sp_state_machine::{Backend as StateBackend, BackendTransaction, KeyValueStates};
 
 /// Extracts the state backend type for the given backend.
 pub type StateBackendFor<B, Block> = <B as Backend<Block>>::State;
 
-/// Extracts the transaction for the given state backend.
-pub type TransactionForSB<B, Block> = <B as StateBackend<HashingFor<Block>>>::Transaction;
-
-/// Extracts the transaction for the given backend.
-pub type TransactionFor<B, Block> = TransactionForSB<StateBackendFor<B, Block>, Block>;
-
 /// Describes which block import notification stream should be notified.
 #[derive(Debug, Clone, Copy)]
 pub enum ImportNotificationAction {
@@ -181,7 +175,7 @@ pub trait BlockImportOperation<Block: BlockT> {
 	/// Inject storage data into the database.
 	fn update_db_storage(
 		&mut self,
-		update: TransactionForSB<Self::State, Block>,
+		update: BackendTransaction<HashingFor<Block>>,
 	) -> sp_blockchain::Result<()>;
 
 	/// Set genesis state. If `commit` is `false` the state is saved in memory, but is not written
diff --git a/substrate/client/api/src/call_executor.rs b/substrate/client/api/src/call_executor.rs
index cecc1053d1b9dbbdd539651fa3a972673d4a9dc0..49b51ccc943edab005390a0726426f106e55b458 100644
--- a/substrate/client/api/src/call_executor.rs
+++ b/substrate/client/api/src/call_executor.rs
@@ -26,7 +26,7 @@ use sp_state_machine::{OverlayedChanges, StorageProof};
 use std::cell::RefCell;
 
 use crate::execution_extensions::ExecutionExtensions;
-use sp_api::{ProofRecorder, StorageTransactionCache};
+use sp_api::{HashingFor, ProofRecorder};
 
 /// Executor Provider
 pub trait ExecutorProvider<Block: BlockT> {
@@ -72,12 +72,7 @@ pub trait CallExecutor<B: BlockT>: RuntimeVersionOf {
 		at_hash: B::Hash,
 		method: &str,
 		call_data: &[u8],
-		changes: &RefCell<OverlayedChanges>,
-		storage_transaction_cache: Option<
-			&RefCell<
-				StorageTransactionCache<B, <Self::Backend as crate::backend::Backend<B>>::State>,
-			>,
-		>,
+		changes: &RefCell<OverlayedChanges<HashingFor<B>>>,
 		proof_recorder: &Option<ProofRecorder<B>>,
 		call_context: CallContext,
 		extensions: &RefCell<Extensions>,
diff --git a/substrate/client/api/src/in_mem.rs b/substrate/client/api/src/in_mem.rs
index 711d739c16711e6b99aa8b798309651597f51aeb..807bdf0e334725eba64d99eb4edb88215436c073 100644
--- a/substrate/client/api/src/in_mem.rs
+++ b/substrate/client/api/src/in_mem.rs
@@ -29,8 +29,8 @@ use sp_runtime::{
 	Justification, Justifications, StateVersion, Storage,
 };
 use sp_state_machine::{
-	Backend as StateBackend, ChildStorageCollection, InMemoryBackend, IndexOperation,
-	StorageCollection,
+	Backend as StateBackend, BackendTransaction, ChildStorageCollection, InMemoryBackend,
+	IndexOperation, StorageCollection,
 };
 use std::{
 	collections::{HashMap, HashSet},
@@ -480,9 +480,7 @@ impl<Block: BlockT> backend::AuxStore for Blockchain<Block> {
 pub struct BlockImportOperation<Block: BlockT> {
 	pending_block: Option<PendingBlock<Block>>,
 	old_state: InMemoryBackend<HashingFor<Block>>,
-	new_state: Option<
-		<InMemoryBackend<HashingFor<Block>> as StateBackend<HashingFor<Block>>>::Transaction,
-	>,
+	new_state: Option<BackendTransaction<HashingFor<Block>>>,
 	aux: Vec<(Vec<u8>, Option<Vec<u8>>)>,
 	finalized_blocks: Vec<(Block::Hash, Option<Justification>)>,
 	set_head: Option<Block::Hash>,
@@ -540,7 +538,7 @@ impl<Block: BlockT> backend::BlockImportOperation<Block> for BlockImportOperatio
 
 	fn update_db_storage(
 		&mut self,
-		update: <InMemoryBackend<HashingFor<Block>> as StateBackend<HashingFor<Block>>>::Transaction,
+		update: BackendTransaction<HashingFor<Block>>,
 	) -> sp_blockchain::Result<()> {
 		self.new_state = Some(update);
 		Ok(())
diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs
index 55aadb07e6756030d726e1dc7472b379dc3840a6..b3a8f0d8970b6148a68abe9abbfa98080a84e4d3 100644
--- a/substrate/client/basic-authorship/src/basic_authorship.rs
+++ b/substrate/client/basic-authorship/src/basic_authorship.rs
@@ -187,8 +187,7 @@ where
 		+ Send
 		+ Sync
 		+ 'static,
-	C::Api:
-		ApiExt<Block, StateBackend = backend::StateBackendFor<B, Block>> + BlockBuilderApi<Block>,
+	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
 {
 	fn init_with_now(
 		&mut self,
@@ -229,8 +228,7 @@ where
 		+ Send
 		+ Sync
 		+ 'static,
-	C::Api:
-		ApiExt<Block, StateBackend = backend::StateBackendFor<B, Block>> + BlockBuilderApi<Block>,
+	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
 	PR: ProofRecording,
 {
 	type CreateProposer = future::Ready<Result<Self::Proposer, Self::Error>>;
@@ -269,17 +267,11 @@ where
 		+ Send
 		+ Sync
 		+ 'static,
-	C::Api:
-		ApiExt<Block, StateBackend = backend::StateBackendFor<B, Block>> + BlockBuilderApi<Block>,
+	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
 	PR: ProofRecording,
 {
-	type Transaction = backend::TransactionFor<B, Block>;
-	type Proposal = Pin<
-		Box<
-			dyn Future<Output = Result<Proposal<Block, Self::Transaction, PR::Proof>, Self::Error>>
-				+ Send,
-		>,
-	>;
+	type Proposal =
+		Pin<Box<dyn Future<Output = Result<Proposal<Block, PR::Proof>, Self::Error>> + Send>>;
 	type Error = sp_blockchain::Error;
 	type ProofRecording = PR;
 	type Proof = PR::Proof;
@@ -332,8 +324,7 @@ where
 		+ Send
 		+ Sync
 		+ 'static,
-	C::Api:
-		ApiExt<Block, StateBackend = backend::StateBackendFor<B, Block>> + BlockBuilderApi<Block>,
+	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
 	PR: ProofRecording,
 {
 	async fn propose_with(
@@ -342,8 +333,7 @@ where
 		inherent_digests: Digest,
 		deadline: time::Instant,
 		block_size_limit: Option<usize>,
-	) -> Result<Proposal<Block, backend::TransactionFor<B, Block>, PR::Proof>, sp_blockchain::Error>
-	{
+	) -> Result<Proposal<Block, PR::Proof>, sp_blockchain::Error> {
 		let propose_with_timer = time::Instant::now();
 		let mut block_builder =
 			self.client.new_block_at(self.parent_hash, inherent_digests, PR::ENABLED)?;
diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs
index 5d692c4bda18cd4fe3e5887ac066ffa3083956d5..1878e762748040c158fdbe7875af1eb205b6048b 100644
--- a/substrate/client/block-builder/src/lib.rs
+++ b/substrate/client/block-builder/src/lib.rs
@@ -85,20 +85,18 @@ impl From<bool> for RecordProof {
 /// backend to get the state of the block. Furthermore an optional `proof` is included which
 /// can be used to proof that the build block contains the expected data. The `proof` will
 /// only be set when proof recording was activated.
-pub struct BuiltBlock<Block: BlockT, StateBackend: backend::StateBackend<HashingFor<Block>>> {
+pub struct BuiltBlock<Block: BlockT> {
 	/// The actual block that was build.
 	pub block: Block,
 	/// The changes that need to be applied to the backend to get the state of the build block.
-	pub storage_changes: StorageChanges<StateBackend, Block>,
+	pub storage_changes: StorageChanges<Block>,
 	/// An optional proof that was recorded while building the block.
 	pub proof: Option<StorageProof>,
 }
 
-impl<Block: BlockT, StateBackend: backend::StateBackend<HashingFor<Block>>>
-	BuiltBlock<Block, StateBackend>
-{
+impl<Block: BlockT> BuiltBlock<Block> {
 	/// Convert into the inner values.
-	pub fn into_inner(self) -> (Block, StorageChanges<StateBackend, Block>, Option<StorageProof>) {
+	pub fn into_inner(self) -> (Block, StorageChanges<Block>, Option<StorageProof>) {
 		(self.block, self.storage_changes, self.proof)
 	}
 }
@@ -145,8 +143,7 @@ impl<'a, Block, A, B> BlockBuilder<'a, Block, A, B>
 where
 	Block: BlockT,
 	A: ProvideRuntimeApi<Block> + 'a,
-	A::Api:
-		BlockBuilderApi<Block> + ApiExt<Block, StateBackend = backend::StateBackendFor<B, Block>>,
+	A::Api: BlockBuilderApi<Block> + ApiExt<Block>,
 	B: backend::Backend<Block>,
 {
 	/// Create a new instance of builder based on the given `parent_hash` and `parent_number`.
@@ -231,7 +228,7 @@ where
 	/// Returns the build `Block`, the changes to the storage and an optional `StorageProof`
 	/// supplied by `self.api`, combined as [`BuiltBlock`].
 	/// The storage proof will be `Some(_)` when proof recording was enabled.
-	pub fn build(mut self) -> Result<BuiltBlock<Block, backend::StateBackendFor<B, Block>>, Error> {
+	pub fn build(mut self) -> Result<BuiltBlock<Block>, Error> {
 		let header = self.api.finalize_block(self.parent_hash)?;
 
 		debug_assert_eq!(
diff --git a/substrate/client/consensus/aura/src/import_queue.rs b/substrate/client/consensus/aura/src/import_queue.rs
index ad9599d4b0243a4ffe420b2bc77428e759d26bbb..a8777ef8788cc3a247d6d09c146f61bf4cb23e62 100644
--- a/substrate/client/consensus/aura/src/import_queue.rs
+++ b/substrate/client/consensus/aura/src/import_queue.rs
@@ -175,8 +175,8 @@ where
 {
 	async fn verify(
 		&mut self,
-		mut block: BlockImportParams<B, ()>,
-	) -> Result<BlockImportParams<B, ()>, String> {
+		mut block: BlockImportParams<B>,
+	) -> Result<BlockImportParams<B>, String> {
 		// Skip checks that include execution, if being told so or when importing only state.
 		//
 		// This is done for example when gap syncing and it is expected that the block after the gap
@@ -348,7 +348,7 @@ pub fn import_queue<P, Block, I, C, S, CIDP>(
 		telemetry,
 		compatibility_mode,
 	}: ImportQueueParams<Block, I, C, S, CIDP>,
-) -> Result<DefaultImportQueue<Block, C>, sp_consensus::Error>
+) -> Result<DefaultImportQueue<Block>, sp_consensus::Error>
 where
 	Block: BlockT,
 	C::Api: BlockBuilderApi<Block> + AuraApi<Block, AuthorityId<P>> + ApiExt<Block>,
@@ -360,10 +360,7 @@ where
 		+ AuxStore
 		+ UsageProvider<Block>
 		+ HeaderBackend<Block>,
-	I: BlockImport<Block, Error = ConsensusError, Transaction = sp_api::TransactionFor<C, Block>>
-		+ Send
-		+ Sync
-		+ 'static,
+	I: BlockImport<Block, Error = ConsensusError> + Send + Sync + 'static,
 	P: Pair + 'static,
 	P::Public: Codec + Debug,
 	P::Signature: Codec,
diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs
index a611006716c696b5798e0db70f234fe5fd37376a..a77f00d08d3e0ae663d7ab3dbdf61f821209f751 100644
--- a/substrate/client/consensus/aura/src/lib.rs
+++ b/substrate/client/consensus/aura/src/lib.rs
@@ -178,9 +178,9 @@ where
 	C: ProvideRuntimeApi<B> + BlockOf + AuxStore + HeaderBackend<B> + Send + Sync,
 	C::Api: AuraApi<B, AuthorityId<P>>,
 	SC: SelectChain<B>,
-	I: BlockImport<B, Transaction = sp_api::TransactionFor<C, B>> + Send + Sync + 'static,
+	I: BlockImport<B> + Send + Sync + 'static,
 	PF: Environment<B, Error = Error> + Send + Sync + 'static,
-	PF::Proposer: Proposer<B, Error = Error, Transaction = sp_api::TransactionFor<C, B>>,
+	PF::Proposer: Proposer<B, Error = Error>,
 	SO: SyncOracle + Send + Sync + Clone,
 	L: sc_consensus::JustificationSyncLink<B>,
 	CIDP: CreateInherentDataProviders<B, ()> + Send + 'static,
@@ -279,11 +279,11 @@ where
 	C: ProvideRuntimeApi<B> + BlockOf + AuxStore + HeaderBackend<B> + Send + Sync,
 	C::Api: AuraApi<B, AuthorityId<P>>,
 	PF: Environment<B, Error = Error> + Send + Sync + 'static,
-	PF::Proposer: Proposer<B, Error = Error, Transaction = sp_api::TransactionFor<C, B>>,
+	PF::Proposer: Proposer<B, Error = Error>,
 	P: Pair,
 	P::Public: AppPublic + Member,
 	P::Signature: TryFrom<Vec<u8>> + Member + Codec,
-	I: BlockImport<B, Transaction = sp_api::TransactionFor<C, B>> + Send + Sync + 'static,
+	I: BlockImport<B> + Send + Sync + 'static,
 	Error: std::error::Error + Send + From<ConsensusError> + 'static,
 	SO: SyncOracle + Send + Sync + Clone,
 	L: sc_consensus::JustificationSyncLink<B>,
@@ -330,8 +330,8 @@ where
 	C: ProvideRuntimeApi<B> + BlockOf + HeaderBackend<B> + Sync,
 	C::Api: AuraApi<B, AuthorityId<P>>,
 	E: Environment<B, Error = Error> + Send + Sync,
-	E::Proposer: Proposer<B, Error = Error, Transaction = sp_api::TransactionFor<C, B>>,
-	I: BlockImport<B, Transaction = sp_api::TransactionFor<C, B>> + Send + Sync + 'static,
+	E::Proposer: Proposer<B, Error = Error>,
+	I: BlockImport<B> + Send + Sync + 'static,
 	P: Pair,
 	P::Public: AppPublic + Member,
 	P::Signature: TryFrom<Vec<u8>> + Member + Codec,
@@ -388,13 +388,10 @@ where
 		header: B::Header,
 		header_hash: &B::Hash,
 		body: Vec<B::Extrinsic>,
-		storage_changes: StorageChanges<<Self::BlockImport as BlockImport<B>>::Transaction, B>,
+		storage_changes: StorageChanges<B>,
 		public: Self::Claim,
 		_authorities: Self::AuxData,
-	) -> Result<
-		sc_consensus::BlockImportParams<B, <Self::BlockImport as BlockImport<B>>::Transaction>,
-		ConsensusError,
-	> {
+	) -> Result<sc_consensus::BlockImportParams<B>, ConsensusError> {
 		let signature_digest_item =
 			crate::standalone::seal::<_, P>(header_hash, &public, &self.keystore)?;
 
@@ -596,9 +593,7 @@ mod tests {
 
 	impl Proposer<TestBlock> for DummyProposer {
 		type Error = Error;
-		type Transaction =
-			sc_client_api::TransactionFor<substrate_test_runtime_client::Backend, TestBlock>;
-		type Proposal = future::Ready<Result<Proposal<TestBlock, Self::Transaction, ()>, Error>>;
+		type Proposal = future::Ready<Result<Proposal<TestBlock, ()>, Error>>;
 		type ProofRecording = DisableProofRecording;
 		type Proof = ();
 
diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs
index 76bd670c200491a498a1123884b06bc3cce001c2..b89fa8f5df65e02807c59fde7a5e899ba97f44f8 100644
--- a/substrate/client/consensus/babe/src/lib.rs
+++ b/substrate/client/consensus/babe/src/lib.rs
@@ -492,11 +492,8 @@ where
 	C::Api: BabeApi<B>,
 	SC: SelectChain<B> + 'static,
 	E: Environment<B, Error = Error> + Send + Sync + 'static,
-	E::Proposer: Proposer<B, Error = Error, Transaction = sp_api::TransactionFor<C, B>>,
-	I: BlockImport<B, Error = ConsensusError, Transaction = sp_api::TransactionFor<C, B>>
-		+ Send
-		+ Sync
-		+ 'static,
+	E::Proposer: Proposer<B, Error = Error>,
+	I: BlockImport<B, Error = ConsensusError> + Send + Sync + 'static,
 	SO: SyncOracle + Send + Sync + Clone + 'static,
 	L: sc_consensus::JustificationSyncLink<B> + 'static,
 	CIDP: CreateInherentDataProviders<B, ()> + Send + Sync + 'static,
@@ -727,8 +724,8 @@ where
 	C: ProvideRuntimeApi<B> + HeaderBackend<B> + HeaderMetadata<B, Error = ClientError>,
 	C::Api: BabeApi<B>,
 	E: Environment<B, Error = Error> + Sync,
-	E::Proposer: Proposer<B, Error = Error, Transaction = sp_api::TransactionFor<C, B>>,
-	I: BlockImport<B, Transaction = sp_api::TransactionFor<C, B>> + Send + Sync + 'static,
+	E::Proposer: Proposer<B, Error = Error>,
+	I: BlockImport<B> + Send + Sync + 'static,
 	SO: SyncOracle + Send + Clone + Sync,
 	L: sc_consensus::JustificationSyncLink<B>,
 	BS: BackoffAuthoringBlocksStrategy<NumberFor<B>> + Sync,
@@ -822,13 +819,10 @@ where
 		header: B::Header,
 		header_hash: &B::Hash,
 		body: Vec<B::Extrinsic>,
-		storage_changes: StorageChanges<<Self::BlockImport as BlockImport<B>>::Transaction, B>,
+		storage_changes: StorageChanges<B>,
 		(_, public): Self::Claim,
 		epoch_descriptor: Self::AuxData,
-	) -> Result<
-		BlockImportParams<B, <Self::BlockImport as BlockImport<B>>::Transaction>,
-		ConsensusError,
-	> {
+	) -> Result<BlockImportParams<B>, ConsensusError> {
 		let signature = self
 			.keystore
 			.sr25519_sign(<AuthorityId as AppCrypto>::ID, public.as_ref(), header_hash.as_ref())
@@ -1137,8 +1131,8 @@ where
 {
 	async fn verify(
 		&mut self,
-		mut block: BlockImportParams<Block, ()>,
-	) -> Result<BlockImportParams<Block, ()>, String> {
+		mut block: BlockImportParams<Block>,
+	) -> Result<BlockImportParams<Block>, String> {
 		trace!(
 			target: LOG_TARGET,
 			"Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}",
@@ -1336,7 +1330,7 @@ impl<Block: BlockT, Client, I> BabeBlockImport<Block, Client, I> {
 impl<Block, Client, Inner> BabeBlockImport<Block, Client, Inner>
 where
 	Block: BlockT,
-	Inner: BlockImport<Block, Transaction = sp_api::TransactionFor<Client, Block>> + Send + Sync,
+	Inner: BlockImport<Block> + Send + Sync,
 	Inner::Error: Into<ConsensusError>,
 	Client: HeaderBackend<Block>
 		+ HeaderMetadata<Block, Error = sp_blockchain::Error>
@@ -1351,7 +1345,7 @@ where
 	// end up in an inconsistent state and have to resync.
 	async fn import_state(
 		&mut self,
-		mut block: BlockImportParams<Block, sp_api::TransactionFor<Client, Block>>,
+		mut block: BlockImportParams<Block>,
 	) -> Result<ImportResult, ConsensusError> {
 		let hash = block.post_hash();
 		let parent_hash = *block.header.parent_hash();
@@ -1400,7 +1394,7 @@ where
 impl<Block, Client, Inner> BlockImport<Block> for BabeBlockImport<Block, Client, Inner>
 where
 	Block: BlockT,
-	Inner: BlockImport<Block, Transaction = sp_api::TransactionFor<Client, Block>> + Send + Sync,
+	Inner: BlockImport<Block> + Send + Sync,
 	Inner::Error: Into<ConsensusError>,
 	Client: HeaderBackend<Block>
 		+ HeaderMetadata<Block, Error = sp_blockchain::Error>
@@ -1411,11 +1405,10 @@ where
 	Client::Api: BabeApi<Block> + ApiExt<Block>,
 {
 	type Error = ConsensusError;
-	type Transaction = sp_api::TransactionFor<Client, Block>;
 
 	async fn import_block(
 		&mut self,
-		mut block: BlockImportParams<Block, Self::Transaction>,
+		mut block: BlockImportParams<Block>,
 	) -> Result<ImportResult, Self::Error> {
 		let hash = block.post_hash();
 		let number = *block.header.number();
@@ -1827,15 +1820,9 @@ pub fn import_queue<Block: BlockT, Client, SelectChain, BI, CIDP, Spawn>(
 		telemetry,
 		offchain_tx_pool_factory,
 	}: ImportQueueParams<'_, Block, BI, Client, CIDP, SelectChain, Spawn>,
-) -> ClientResult<(DefaultImportQueue<Block, Client>, BabeWorkerHandle<Block>)>
+) -> ClientResult<(DefaultImportQueue<Block>, BabeWorkerHandle<Block>)>
 where
-	BI: BlockImport<
-			Block,
-			Error = ConsensusError,
-			Transaction = sp_api::TransactionFor<Client, Block>,
-		> + Send
-		+ Sync
-		+ 'static,
+	BI: BlockImport<Block, Error = ConsensusError> + Send + Sync + 'static,
 	Client: ProvideRuntimeApi<Block>
 		+ HeaderBackend<Block>
 		+ HeaderMetadata<Block, Error = sp_blockchain::Error>
diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs
index 384e45228b5999bc841a5cb54d924c71e1230d4c..b3843f8acfa0a481ed456f13bdce5a7fb54173b4 100644
--- a/substrate/client/consensus/babe/src/tests.rs
+++ b/substrate/client/consensus/babe/src/tests.rs
@@ -21,7 +21,7 @@
 use super::*;
 use authorship::claim_slot;
 use sc_block_builder::{BlockBuilder, BlockBuilderProvider};
-use sc_client_api::{backend::TransactionFor, BlockchainEvents, Finalizer};
+use sc_client_api::{BlockchainEvents, Finalizer};
 use sc_consensus::{BoxBlockImport, BoxJustificationImport};
 use sc_consensus_epochs::{EpochIdentifier, EpochIdentifierPosition};
 use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging;
@@ -97,16 +97,7 @@ impl DummyProposer {
 	fn propose_with(
 		&mut self,
 		pre_digests: Digest,
-	) -> future::Ready<
-		Result<
-			Proposal<
-				TestBlock,
-				sc_client_api::TransactionFor<substrate_test_runtime_client::Backend, TestBlock>,
-				(),
-			>,
-			Error,
-		>,
-	> {
+	) -> future::Ready<Result<Proposal<TestBlock, ()>, Error>> {
 		let block_builder =
 			self.factory.client.new_block_at(self.parent_hash, pre_digests, false).unwrap();
 
@@ -124,9 +115,7 @@ impl DummyProposer {
 
 impl Proposer<TestBlock> for DummyProposer {
 	type Error = Error;
-	type Transaction =
-		sc_client_api::TransactionFor<substrate_test_runtime_client::Backend, TestBlock>;
-	type Proposal = future::Ready<Result<Proposal<TestBlock, Self::Transaction, ()>, Error>>;
+	type Proposal = future::Ready<Result<Proposal<TestBlock, ()>, Error>>;
 	type ProofRecording = DisableProofRecording;
 	type Proof = ();
 
@@ -151,15 +140,13 @@ pub struct PanickingBlockImport<B>(B);
 #[async_trait::async_trait]
 impl<B: BlockImport<TestBlock>> BlockImport<TestBlock> for PanickingBlockImport<B>
 where
-	B::Transaction: Send,
 	B: Send,
 {
 	type Error = B::Error;
-	type Transaction = B::Transaction;
 
 	async fn import_block(
 		&mut self,
-		block: BlockImportParams<TestBlock, Self::Transaction>,
+		block: BlockImportParams<TestBlock>,
 	) -> Result<ImportResult, Self::Error> {
 		Ok(self.0.import_block(block).await.expect("importing block failed"))
 	}
@@ -207,8 +194,8 @@ impl Verifier<TestBlock> for TestVerifier {
 	/// presented to the User in the logs.
 	async fn verify(
 		&mut self,
-		mut block: BlockImportParams<TestBlock, ()>,
-	) -> Result<BlockImportParams<TestBlock, ()>, String> {
+		mut block: BlockImportParams<TestBlock>,
+	) -> Result<BlockImportParams<TestBlock>, String> {
 		// apply post-sealing mutations (i.e. stripping seal, if desired).
 		(self.mutator)(&mut block.header, Stage::PostSeal);
 		self.inner.verify(block).await
@@ -217,14 +204,7 @@ impl Verifier<TestBlock> for TestVerifier {
 
 pub struct PeerData {
 	link: BabeLink<TestBlock>,
-	block_import: Mutex<
-		Option<
-			BoxBlockImport<
-				TestBlock,
-				TransactionFor<substrate_test_runtime_client::Backend, TestBlock>,
-			>,
-		>,
-	>,
+	block_import: Mutex<Option<BoxBlockImport<TestBlock>>>,
 }
 
 impl TestNetFactory for BabeTestNet {
@@ -249,7 +229,7 @@ impl TestNetFactory for BabeTestNet {
 		let block_import = PanickingBlockImport(block_import);
 
 		let data_block_import =
-			Mutex::new(Some(Box::new(block_import.clone()) as BoxBlockImport<_, _>));
+			Mutex::new(Some(Box::new(block_import.clone()) as BoxBlockImport<_>));
 		(
 			BlockImportAdapter::new(block_import),
 			None,
@@ -630,11 +610,11 @@ fn claim_vrf_check() {
 }
 
 // Propose and import a new BABE block on top of the given parent.
-async fn propose_and_import_block<Transaction: Send + 'static>(
+async fn propose_and_import_block(
 	parent: &TestHeader,
 	slot: Option<Slot>,
 	proposer_factory: &mut DummyFactory,
-	block_import: &mut BoxBlockImport<TestBlock, Transaction>,
+	block_import: &mut BoxBlockImport<TestBlock>,
 ) -> Hash {
 	let mut proposer = proposer_factory.init(parent).await.unwrap();
 
@@ -701,10 +681,10 @@ async fn propose_and_import_block<Transaction: Send + 'static>(
 // Propose and import n valid BABE blocks that are built on top of the given parent.
 // The proposer takes care of producing epoch change digests according to the epoch
 // duration (which is set to 6 slots in the test runtime).
-async fn propose_and_import_blocks<Transaction: Send + 'static>(
+async fn propose_and_import_blocks(
 	client: &PeersFullClient,
 	proposer_factory: &mut DummyFactory,
-	block_import: &mut BoxBlockImport<TestBlock, Transaction>,
+	block_import: &mut BoxBlockImport<TestBlock>,
 	parent_hash: Hash,
 	n: usize,
 ) -> Vec<Hash> {
diff --git a/substrate/client/consensus/beefy/src/import.rs b/substrate/client/consensus/beefy/src/import.rs
index 80f8cebe488800c146551664e691ba0649e6021b..5b2abb20acede2764502bfadb1948550c5c7a8b2 100644
--- a/substrate/client/consensus/beefy/src/import.rs
+++ b/substrate/client/consensus/beefy/src/import.rs
@@ -20,7 +20,7 @@ use std::sync::Arc;
 
 use log::debug;
 
-use sp_api::{ProvideRuntimeApi, TransactionFor};
+use sp_api::ProvideRuntimeApi;
 use sp_consensus::Error as ConsensusError;
 use sp_consensus_beefy::{ecdsa_crypto::AuthorityId, BeefyApi, BEEFY_ENGINE_ID};
 use sp_runtime::{
@@ -118,21 +118,15 @@ impl<Block, BE, Runtime, I> BlockImport<Block> for BeefyBlockImport<Block, BE, R
 where
 	Block: BlockT,
 	BE: Backend<Block>,
-	I: BlockImport<
-			Block,
-			Error = ConsensusError,
-			Transaction = sp_api::TransactionFor<Runtime, Block>,
-		> + Send
-		+ Sync,
+	I: BlockImport<Block, Error = ConsensusError> + Send + Sync,
 	Runtime: ProvideRuntimeApi<Block> + Send + Sync,
 	Runtime::Api: BeefyApi<Block, AuthorityId>,
 {
 	type Error = ConsensusError;
-	type Transaction = TransactionFor<Runtime, Block>;
 
 	async fn import_block(
 		&mut self,
-		mut block: BlockImportParams<Block, Self::Transaction>,
+		mut block: BlockImportParams<Block>,
 	) -> Result<ImportResult, Self::Error> {
 		let hash = block.post_hash();
 		let number = *block.header.number();
diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs
index 52a4550cda4f1a6c17aaa5447b138ee447eeffaa..da339dae7e1f584d68935728aba8fa77b4bd7fc2 100644
--- a/substrate/client/consensus/beefy/src/lib.rs
+++ b/substrate/client/consensus/beefy/src/lib.rs
@@ -139,9 +139,7 @@ pub fn beefy_block_import_and_links<B, BE, RuntimeApi, I>(
 where
 	B: Block,
 	BE: Backend<B>,
-	I: BlockImport<B, Error = ConsensusError, Transaction = sp_api::TransactionFor<RuntimeApi, B>>
-		+ Send
-		+ Sync,
+	I: BlockImport<B, Error = ConsensusError> + Send + Sync,
 	RuntimeApi: ProvideRuntimeApi<B> + Send + Sync,
 	RuntimeApi::Api: BeefyApi<B, AuthorityId>,
 {
diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs
index 1109a2263822183f78a57e3df76bd531206f67f2..3bb65e9d57f435b78b9faccffb687bc4ab64757d 100644
--- a/substrate/client/consensus/beefy/src/tests.rs
+++ b/substrate/client/consensus/beefy/src/tests.rs
@@ -83,7 +83,7 @@ type BeefyBlockImport = crate::BeefyBlockImport<
 	Block,
 	substrate_test_runtime_client::Backend,
 	TestApi,
-	BlockImportAdapter<PeersClient, sp_api::TransactionFor<TestApi, Block>>,
+	BlockImportAdapter<PeersClient>,
 >;
 
 pub(crate) type BeefyValidatorSet = ValidatorSet<AuthorityId>;
diff --git a/substrate/client/consensus/common/src/block_import.rs b/substrate/client/consensus/common/src/block_import.rs
index b84a45d533d46405d0673af0b4da1226bbc99f5f..a451692ad478e41fb48a055cd2a5a5ae3c12c510 100644
--- a/substrate/client/consensus/common/src/block_import.rs
+++ b/substrate/client/consensus/common/src/block_import.rs
@@ -119,9 +119,9 @@ pub struct BlockCheckParams<Block: BlockT> {
 }
 
 /// Precomputed storage.
-pub enum StorageChanges<Block: BlockT, Transaction> {
+pub enum StorageChanges<Block: BlockT> {
 	/// Changes coming from block execution.
-	Changes(sp_state_machine::StorageChanges<Transaction, HashingFor<Block>>),
+	Changes(sp_state_machine::StorageChanges<HashingFor<Block>>),
 	/// Whole new state.
 	Import(ImportedState<Block>),
 }
@@ -142,9 +142,9 @@ impl<B: BlockT> std::fmt::Debug for ImportedState<B> {
 }
 
 /// Defines how a new state is computed for a given imported block.
-pub enum StateAction<Block: BlockT, Transaction> {
+pub enum StateAction<Block: BlockT> {
 	/// Apply precomputed changes coming from block execution or state sync.
-	ApplyChanges(StorageChanges<Block, Transaction>),
+	ApplyChanges(StorageChanges<Block>),
 	/// Execute block body (required) and compute state.
 	Execute,
 	/// Execute block body if parent state is available and compute state.
@@ -153,7 +153,7 @@ pub enum StateAction<Block: BlockT, Transaction> {
 	Skip,
 }
 
-impl<Block: BlockT, Transaction> StateAction<Block, Transaction> {
+impl<Block: BlockT> StateAction<Block> {
 	/// Check if execution checks that require runtime calls should be skipped.
 	pub fn skip_execution_checks(&self) -> bool {
 		match self {
@@ -167,7 +167,7 @@ impl<Block: BlockT, Transaction> StateAction<Block, Transaction> {
 
 /// Data required to import a Block.
 #[non_exhaustive]
-pub struct BlockImportParams<Block: BlockT, Transaction> {
+pub struct BlockImportParams<Block: BlockT> {
 	/// Origin of the Block
 	pub origin: BlockOrigin,
 	/// The header, without consensus post-digests applied. This should be in the same
@@ -192,7 +192,7 @@ pub struct BlockImportParams<Block: BlockT, Transaction> {
 	/// Indexed transaction body of the block.
 	pub indexed_body: Option<Vec<Vec<u8>>>,
 	/// Specify how the new state is computed.
-	pub state_action: StateAction<Block, Transaction>,
+	pub state_action: StateAction<Block>,
 	/// Is this block finalized already?
 	/// `true` implies instant finality.
 	pub finalized: bool,
@@ -218,7 +218,7 @@ pub struct BlockImportParams<Block: BlockT, Transaction> {
 	pub post_hash: Option<Block::Hash>,
 }
 
-impl<Block: BlockT, Transaction> BlockImportParams<Block, Transaction> {
+impl<Block: BlockT> BlockImportParams<Block> {
 	/// Create a new block import params.
 	pub fn new(origin: BlockOrigin, header: Block::Header) -> Self {
 		Self {
@@ -261,39 +261,6 @@ impl<Block: BlockT, Transaction> BlockImportParams<Block, Transaction> {
 		}
 	}
 
-	/// Auxiliary function for "converting" the transaction type.
-	///
-	/// Actually this just sets `StorageChanges::Changes` to `None` and makes rustc think that
-	/// `Self` now uses a different transaction type.
-	pub fn clear_storage_changes_and_mutate<Transaction2>(
-		self,
-	) -> BlockImportParams<Block, Transaction2> {
-		// Preserve imported state.
-		let state_action = match self.state_action {
-			StateAction::ApplyChanges(StorageChanges::Import(state)) =>
-				StateAction::ApplyChanges(StorageChanges::Import(state)),
-			StateAction::ApplyChanges(StorageChanges::Changes(_)) => StateAction::Skip,
-			StateAction::Execute => StateAction::Execute,
-			StateAction::ExecuteIfPossible => StateAction::ExecuteIfPossible,
-			StateAction::Skip => StateAction::Skip,
-		};
-		BlockImportParams {
-			origin: self.origin,
-			header: self.header,
-			justifications: self.justifications,
-			post_digests: self.post_digests,
-			body: self.body,
-			indexed_body: self.indexed_body,
-			state_action,
-			finalized: self.finalized,
-			auxiliary: self.auxiliary,
-			intermediates: self.intermediates,
-			fork_choice: self.fork_choice,
-			import_existing: self.import_existing,
-			post_hash: self.post_hash,
-		}
-	}
-
 	/// Insert intermediate by given key.
 	pub fn insert_intermediate<T: 'static + Send>(&mut self, key: &'static [u8], value: T) {
 		self.intermediates.insert(Cow::from(key), Box::new(value));
@@ -338,8 +305,6 @@ impl<Block: BlockT, Transaction> BlockImportParams<Block, Transaction> {
 pub trait BlockImport<B: BlockT> {
 	/// The error type.
 	type Error: std::error::Error + Send + 'static;
-	/// The transaction type used by the backend.
-	type Transaction: Send + 'static;
 
 	/// Check block preconditions.
 	async fn check_block(
@@ -350,17 +315,13 @@ pub trait BlockImport<B: BlockT> {
 	/// Import a block.
 	async fn import_block(
 		&mut self,
-		block: BlockImportParams<B, Self::Transaction>,
+		block: BlockImportParams<B>,
 	) -> Result<ImportResult, Self::Error>;
 }
 
 #[async_trait::async_trait]
-impl<B: BlockT, Transaction> BlockImport<B> for crate::import_queue::BoxBlockImport<B, Transaction>
-where
-	Transaction: Send + 'static,
-{
+impl<B: BlockT> BlockImport<B> for crate::import_queue::BoxBlockImport<B> {
 	type Error = sp_consensus::error::Error;
-	type Transaction = Transaction;
 
 	/// Check block preconditions.
 	async fn check_block(
@@ -373,21 +334,19 @@ where
 	/// Import a block.
 	async fn import_block(
 		&mut self,
-		block: BlockImportParams<B, Transaction>,
+		block: BlockImportParams<B>,
 	) -> Result<ImportResult, Self::Error> {
 		(**self).import_block(block).await
 	}
 }
 
 #[async_trait::async_trait]
-impl<B: BlockT, T, E: std::error::Error + Send + 'static, Transaction> BlockImport<B> for Arc<T>
+impl<B: BlockT, T, E: std::error::Error + Send + 'static> BlockImport<B> for Arc<T>
 where
-	for<'r> &'r T: BlockImport<B, Error = E, Transaction = Transaction>,
+	for<'r> &'r T: BlockImport<B, Error = E>,
 	T: Send + Sync,
-	Transaction: Send + 'static,
 {
 	type Error = E;
-	type Transaction = Transaction;
 
 	async fn check_block(
 		&mut self,
@@ -398,7 +357,7 @@ where
 
 	async fn import_block(
 		&mut self,
-		block: BlockImportParams<B, Transaction>,
+		block: BlockImportParams<B>,
 	) -> Result<ImportResult, Self::Error> {
 		(&**self).import_block(block).await
 	}
diff --git a/substrate/client/consensus/common/src/import_queue.rs b/substrate/client/consensus/common/src/import_queue.rs
index bffc06df07076f42ef6554cc3f718b8521db048d..39d5bf8ed35d17468a8d8819a423f22a0d80667c 100644
--- a/substrate/client/consensus/common/src/import_queue.rs
+++ b/substrate/client/consensus/common/src/import_queue.rs
@@ -50,16 +50,14 @@ const LOG_TARGET: &str = "sync::import-queue";
 /// A commonly-used Import Queue type.
 ///
 /// This defines the transaction type of the `BasicQueue` to be the transaction type for a client.
-pub type DefaultImportQueue<Block, Client> =
-	BasicQueue<Block, sp_api::TransactionFor<Client, Block>>;
+pub type DefaultImportQueue<Block> = BasicQueue<Block>;
 
 mod basic_queue;
 pub mod buffered_link;
 pub mod mock;
 
 /// Shared block import struct used by the queue.
-pub type BoxBlockImport<B, Transaction> =
-	Box<dyn BlockImport<B, Error = ConsensusError, Transaction = Transaction> + Send + Sync>;
+pub type BoxBlockImport<B> = Box<dyn BlockImport<B, Error = ConsensusError> + Send + Sync>;
 
 /// Shared justification import struct used by the queue.
 pub type BoxJustificationImport<B> =
@@ -98,10 +96,8 @@ pub struct IncomingBlock<B: BlockT> {
 pub trait Verifier<B: BlockT>: Send {
 	/// Verify the given block data and return the `BlockImportParams` to
 	/// continue the block import process.
-	async fn verify(
-		&mut self,
-		block: BlockImportParams<B, ()>,
-	) -> Result<BlockImportParams<B, ()>, String>;
+	async fn verify(&mut self, block: BlockImportParams<B>)
+		-> Result<BlockImportParams<B>, String>;
 }
 
 /// Blocks import queue API.
@@ -221,8 +217,8 @@ pub enum BlockImportError {
 type BlockImportResult<B> = Result<BlockImportStatus<NumberFor<B>>, BlockImportError>;
 
 /// Single block import function.
-pub async fn import_single_block<B: BlockT, V: Verifier<B>, Transaction: Send + 'static>(
-	import_handle: &mut impl BlockImport<B, Transaction = Transaction, Error = ConsensusError>,
+pub async fn import_single_block<B: BlockT, V: Verifier<B>>(
+	import_handle: &mut impl BlockImport<B, Error = ConsensusError>,
 	block_origin: BlockOrigin,
 	block: IncomingBlock<B>,
 	verifier: &mut V,
@@ -231,12 +227,8 @@ pub async fn import_single_block<B: BlockT, V: Verifier<B>, Transaction: Send +
 }
 
 /// Single block import function with metering.
-pub(crate) async fn import_single_block_metered<
-	B: BlockT,
-	V: Verifier<B>,
-	Transaction: Send + 'static,
->(
-	import_handle: &mut impl BlockImport<B, Transaction = Transaction, Error = ConsensusError>,
+pub(crate) async fn import_single_block_metered<B: BlockT, V: Verifier<B>>(
+	import_handle: &mut impl BlockImport<B, Error = ConsensusError>,
 	block_origin: BlockOrigin,
 	block: IncomingBlock<B>,
 	verifier: &mut V,
@@ -350,7 +342,6 @@ pub(crate) async fn import_single_block_metered<
 		metrics.report_verification(true, started.elapsed());
 	}
 
-	let import_block = import_block.clear_storage_changes_and_mutate();
 	let imported = import_handle.import_block(import_block).await;
 	if let Some(metrics) = metrics.as_ref() {
 		metrics.report_verification_and_import(started.elapsed());
diff --git a/substrate/client/consensus/common/src/import_queue/basic_queue.rs b/substrate/client/consensus/common/src/import_queue/basic_queue.rs
index b93913703d39faabd0b00289fc39274ab0e0bf48..1cc7ec26fd1930100ebe45f9c1e8ba643bd5be37 100644
--- a/substrate/client/consensus/common/src/import_queue/basic_queue.rs
+++ b/substrate/client/consensus/common/src/import_queue/basic_queue.rs
@@ -28,7 +28,7 @@ use sp_runtime::{
 	traits::{Block as BlockT, Header as HeaderT, NumberFor},
 	Justification, Justifications,
 };
-use std::{marker::PhantomData, pin::Pin, time::Duration};
+use std::{pin::Pin, time::Duration};
 
 use crate::{
 	import_queue::{
@@ -42,15 +42,14 @@ use crate::{
 
 /// Interface to a basic block import queue that is importing blocks sequentially in a separate
 /// task, with plugable verification.
-pub struct BasicQueue<B: BlockT, Transaction> {
+pub struct BasicQueue<B: BlockT> {
 	/// Handle for sending justification and block import messages to the background task.
 	handle: BasicQueueHandle<B>,
 	/// Results coming from the worker task.
 	result_port: BufferedLinkReceiver<B>,
-	_phantom: PhantomData<Transaction>,
 }
 
-impl<B: BlockT, Transaction> Drop for BasicQueue<B, Transaction> {
+impl<B: BlockT> Drop for BasicQueue<B> {
 	fn drop(&mut self) {
 		// Flush the queue and close the receiver to terminate the future.
 		self.handle.close();
@@ -58,13 +57,13 @@ impl<B: BlockT, Transaction> Drop for BasicQueue<B, Transaction> {
 	}
 }
 
-impl<B: BlockT, Transaction: Send + 'static> BasicQueue<B, Transaction> {
+impl<B: BlockT> BasicQueue<B> {
 	/// Instantiate a new basic queue, with given verifier.
 	///
 	/// This creates a background task, and calls `on_start` on the justification importer.
 	pub fn new<V: 'static + Verifier<B>>(
 		verifier: V,
-		block_import: BoxBlockImport<B, Transaction>,
+		block_import: BoxBlockImport<B>,
 		justification_import: Option<BoxJustificationImport<B>>,
 		spawner: &impl sp_core::traits::SpawnEssentialNamed,
 		prometheus_registry: Option<&Registry>,
@@ -96,7 +95,6 @@ impl<B: BlockT, Transaction: Send + 'static> BasicQueue<B, Transaction> {
 		Self {
 			handle: BasicQueueHandle::new(justification_sender, block_import_sender),
 			result_port,
-			_phantom: PhantomData,
 		}
 	}
 }
@@ -165,7 +163,7 @@ impl<B: BlockT> ImportQueueService<B> for BasicQueueHandle<B> {
 }
 
 #[async_trait::async_trait]
-impl<B: BlockT, Transaction: Send> ImportQueue<B> for BasicQueue<B, Transaction> {
+impl<B: BlockT> ImportQueue<B> for BasicQueue<B> {
 	/// Get handle to [`ImportQueueService`].
 	fn service(&self) -> Box<dyn ImportQueueService<B>> {
 		Box::new(self.handle.clone())
@@ -220,8 +218,8 @@ mod worker_messages {
 /// to give other futures the possibility to be run.
 ///
 /// Returns when `block_import` ended.
-async fn block_import_process<B: BlockT, Transaction: Send + 'static>(
-	mut block_import: BoxBlockImport<B, Transaction>,
+async fn block_import_process<B: BlockT>(
+	mut block_import: BoxBlockImport<B>,
 	mut verifier: impl Verifier<B>,
 	mut result_sender: BufferedLinkSender<B>,
 	mut block_import_receiver: TracingUnboundedReceiver<worker_messages::ImportBlocks<B>>,
@@ -262,10 +260,10 @@ struct BlockImportWorker<B: BlockT> {
 }
 
 impl<B: BlockT> BlockImportWorker<B> {
-	fn new<V: 'static + Verifier<B>, Transaction: Send + 'static>(
+	fn new<V: 'static + Verifier<B>>(
 		result_sender: BufferedLinkSender<B>,
 		verifier: V,
-		block_import: BoxBlockImport<B, Transaction>,
+		block_import: BoxBlockImport<B>,
 		justification_import: Option<BoxJustificationImport<B>>,
 		metrics: Option<Metrics>,
 	) -> (
@@ -391,8 +389,8 @@ struct ImportManyBlocksResult<B: BlockT> {
 ///
 /// This will yield after each imported block once, to ensure that other futures can
 /// be called as well.
-async fn import_many_blocks<B: BlockT, V: Verifier<B>, Transaction: Send + 'static>(
-	import_handle: &mut BoxBlockImport<B, Transaction>,
+async fn import_many_blocks<B: BlockT, V: Verifier<B>>(
+	import_handle: &mut BoxBlockImport<B>,
 	blocks_origin: BlockOrigin,
 	blocks: Vec<IncomingBlock<B>>,
 	verifier: &mut V,
@@ -507,14 +505,14 @@ mod tests {
 		import_queue::Verifier,
 	};
 	use futures::{executor::block_on, Future};
-	use sp_test_primitives::{Block, BlockNumber, Extrinsic, Hash, Header};
+	use sp_test_primitives::{Block, BlockNumber, Hash, Header};
 
 	#[async_trait::async_trait]
 	impl Verifier<Block> for () {
 		async fn verify(
 			&mut self,
-			block: BlockImportParams<Block, ()>,
-		) -> Result<BlockImportParams<Block, ()>, String> {
+			block: BlockImportParams<Block>,
+		) -> Result<BlockImportParams<Block>, String> {
 			Ok(BlockImportParams::new(block.origin, block.header))
 		}
 	}
@@ -522,7 +520,6 @@ mod tests {
 	#[async_trait::async_trait]
 	impl BlockImport<Block> for () {
 		type Error = sp_consensus::Error;
-		type Transaction = Extrinsic;
 
 		async fn check_block(
 			&mut self,
@@ -533,7 +530,7 @@ mod tests {
 
 		async fn import_block(
 			&mut self,
-			_block: BlockImportParams<Block, Self::Transaction>,
+			_block: BlockImportParams<Block>,
 		) -> Result<ImportResult, Self::Error> {
 			Ok(ImportResult::imported(true))
 		}
diff --git a/substrate/client/consensus/grandpa/src/import.rs b/substrate/client/consensus/grandpa/src/import.rs
index 760cb2da0484d99793c008c14c110f39d9610ee8..8481b395847292173b4fa89206d9610d5aa767e0 100644
--- a/substrate/client/consensus/grandpa/src/import.rs
+++ b/substrate/client/consensus/grandpa/src/import.rs
@@ -28,7 +28,7 @@ use sc_consensus::{
 };
 use sc_telemetry::TelemetryHandle;
 use sc_utils::mpsc::TracingUnboundedSender;
-use sp_api::{Core, RuntimeApiInfo, TransactionFor};
+use sp_api::{Core, RuntimeApiInfo};
 use sp_blockchain::BlockStatus;
 use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain};
 use sp_consensus_grandpa::{ConsensusLog, GrandpaApi, ScheduledChange, SetId, GRANDPA_ENGINE_ID};
@@ -234,9 +234,7 @@ where
 	BE: Backend<Block>,
 	Client: ClientForGrandpa<Block, BE>,
 	Client::Api: GrandpaApi<Block>,
-	for<'a> &'a Client:
-		BlockImport<Block, Error = ConsensusError, Transaction = TransactionFor<Client, Block>>,
-	TransactionFor<Client, Block>: 'static,
+	for<'a> &'a Client: BlockImport<Block, Error = ConsensusError>,
 {
 	// check for a new authority set change.
 	fn check_new_change(
@@ -273,7 +271,7 @@ where
 
 	fn make_authorities_changes(
 		&self,
-		block: &mut BlockImportParams<Block, TransactionFor<Client, Block>>,
+		block: &mut BlockImportParams<Block>,
 		hash: Block::Hash,
 		initial_sync: bool,
 	) -> Result<PendingSetChanges<Block>, ConsensusError> {
@@ -461,7 +459,7 @@ where
 	/// Import whole new state and reset authority set.
 	async fn import_state(
 		&mut self,
-		mut block: BlockImportParams<Block, TransactionFor<Client, Block>>,
+		mut block: BlockImportParams<Block>,
 	) -> Result<ImportResult, ConsensusError> {
 		let hash = block.post_hash();
 		let number = *block.header.number();
@@ -516,17 +514,14 @@ where
 	BE: Backend<Block>,
 	Client: ClientForGrandpa<Block, BE>,
 	Client::Api: GrandpaApi<Block>,
-	for<'a> &'a Client:
-		BlockImport<Block, Error = ConsensusError, Transaction = TransactionFor<Client, Block>>,
-	TransactionFor<Client, Block>: 'static,
+	for<'a> &'a Client: BlockImport<Block, Error = ConsensusError>,
 	SC: Send,
 {
 	type Error = ConsensusError;
-	type Transaction = TransactionFor<Client, Block>;
 
 	async fn import_block(
 		&mut self,
-		mut block: BlockImportParams<Block, Self::Transaction>,
+		mut block: BlockImportParams<Block>,
 	) -> Result<ImportResult, Self::Error> {
 		let hash = block.post_hash();
 		let number = *block.header.number();
diff --git a/substrate/client/consensus/grandpa/src/lib.rs b/substrate/client/consensus/grandpa/src/lib.rs
index ff0412aeb314c70b58095aa90df62d3899b4cbd4..da621abd254caf90abf5dfab0511d5e868218168 100644
--- a/substrate/client/consensus/grandpa/src/lib.rs
+++ b/substrate/client/consensus/grandpa/src/lib.rs
@@ -65,7 +65,6 @@ use sc_client_api::{
 	backend::{AuxStore, Backend},
 	utils::is_descendent_of,
 	BlockchainEvents, CallExecutor, ExecutorProvider, Finalizer, LockImportRun, StorageProvider,
-	TransactionFor,
 };
 use sc_consensus::BlockImport;
 use sc_network::types::ProtocolName;
@@ -309,7 +308,7 @@ pub trait ClientForGrandpa<Block, BE>:
 	+ BlockchainEvents<Block>
 	+ ProvideRuntimeApi<Block>
 	+ ExecutorProvider<Block>
-	+ BlockImport<Block, Transaction = TransactionFor<BE, Block>, Error = sp_consensus::Error>
+	+ BlockImport<Block, Error = sp_consensus::Error>
 	+ StorageProvider<Block, BE>
 where
 	BE: Backend<Block>,
@@ -329,7 +328,7 @@ where
 		+ BlockchainEvents<Block>
 		+ ProvideRuntimeApi<Block>
 		+ ExecutorProvider<Block>
-		+ BlockImport<Block, Transaction = TransactionFor<BE, Block>, Error = sp_consensus::Error>
+		+ BlockImport<Block, Error = sp_consensus::Error>
 		+ StorageProvider<Block, BE>,
 {
 }
diff --git a/substrate/client/consensus/manual-seal/src/consensus.rs b/substrate/client/consensus/manual-seal/src/consensus.rs
index b54ec5e41b750a8daafa7511d079959a3915a479..2cc2b902b1ce9a57c19ea4fd34a26b62412deb99 100644
--- a/substrate/client/consensus/manual-seal/src/consensus.rs
+++ b/substrate/client/consensus/manual-seal/src/consensus.rs
@@ -30,9 +30,6 @@ pub mod timestamp;
 /// Consensus data provider, manual seal uses this trait object for authoring blocks valid
 /// for any runtime.
 pub trait ConsensusDataProvider<B: BlockT>: Send + Sync {
-	/// Block import transaction type
-	type Transaction;
-
 	/// The proof type.
 	type Proof;
 
@@ -43,7 +40,7 @@ pub trait ConsensusDataProvider<B: BlockT>: Send + Sync {
 	fn append_block_import(
 		&self,
 		parent: &B::Header,
-		params: &mut BlockImportParams<B, Self::Transaction>,
+		params: &mut BlockImportParams<B>,
 		inherents: &InherentData,
 		proof: Self::Proof,
 	) -> Result<(), Error>;
diff --git a/substrate/client/consensus/manual-seal/src/consensus/aura.rs b/substrate/client/consensus/manual-seal/src/consensus/aura.rs
index 92203f91826f0751d44b924020dee5bf01ccb556..566a2266c701b4cdffaa98caef14d8e88ac60952 100644
--- a/substrate/client/consensus/manual-seal/src/consensus/aura.rs
+++ b/substrate/client/consensus/manual-seal/src/consensus/aura.rs
@@ -22,7 +22,7 @@
 use crate::{ConsensusDataProvider, Error};
 use sc_client_api::{AuxStore, UsageProvider};
 use sc_consensus::BlockImportParams;
-use sp_api::{ProvideRuntimeApi, TransactionFor};
+use sp_api::ProvideRuntimeApi;
 use sp_blockchain::{HeaderBackend, HeaderMetadata};
 use sp_consensus_aura::{
 	digests::CompatibleDigestItem,
@@ -69,7 +69,6 @@ where
 	C::Api: AuraApi<B, AuthorityId>,
 	P: Send + Sync,
 {
-	type Transaction = TransactionFor<C, B>;
 	type Proof = P;
 
 	fn create_digest(
@@ -92,7 +91,7 @@ where
 	fn append_block_import(
 		&self,
 		_parent: &B::Header,
-		_params: &mut BlockImportParams<B, Self::Transaction>,
+		_params: &mut BlockImportParams<B>,
 		_inherents: &InherentData,
 		_proof: Self::Proof,
 	) -> Result<(), Error> {
diff --git a/substrate/client/consensus/manual-seal/src/consensus/babe.rs b/substrate/client/consensus/manual-seal/src/consensus/babe.rs
index 2485bd603e785444cb5253a5adef939720ac45cb..26fa81459808c39635a1970b0fef7b36774803a9 100644
--- a/substrate/client/consensus/manual-seal/src/consensus/babe.rs
+++ b/substrate/client/consensus/manual-seal/src/consensus/babe.rs
@@ -33,7 +33,7 @@ use sp_keystore::KeystorePtr;
 use std::{marker::PhantomData, sync::Arc};
 
 use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier};
-use sp_api::{ProvideRuntimeApi, TransactionFor};
+use sp_api::ProvideRuntimeApi;
 use sp_blockchain::{HeaderBackend, HeaderMetadata};
 use sp_consensus_babe::{
 	digests::{NextEpochDescriptor, PreDigest, SecondaryPlainPreDigest},
@@ -97,8 +97,8 @@ where
 {
 	async fn verify(
 		&mut self,
-		mut import_params: BlockImportParams<B, ()>,
-	) -> Result<BlockImportParams<B, ()>, String> {
+		mut import_params: BlockImportParams<B>,
+	) -> Result<BlockImportParams<B>, String> {
 		import_params.finalized = false;
 		import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain);
 
@@ -197,7 +197,6 @@ where
 	C::Api: BabeApi<B>,
 	P: Send + Sync,
 {
-	type Transaction = TransactionFor<C, B>;
 	type Proof = P;
 
 	fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result<Digest, Error> {
@@ -264,7 +263,7 @@ where
 	fn append_block_import(
 		&self,
 		parent: &B::Header,
-		params: &mut BlockImportParams<B, Self::Transaction>,
+		params: &mut BlockImportParams<B>,
 		inherents: &InherentData,
 		_proof: Self::Proof,
 	) -> Result<(), Error> {
diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs
index 03c9418b5c560eea1216d3f3adfda49acb2d7fef..c3b891b84e8934f57b11c604afd13ac8c8a8f882 100644
--- a/substrate/client/consensus/manual-seal/src/lib.rs
+++ b/substrate/client/consensus/manual-seal/src/lib.rs
@@ -52,7 +52,7 @@ pub use self::{
 	seal_block::{seal_block, SealBlockParams, MAX_PROPOSAL_DURATION},
 };
 use sc_transaction_pool_api::TransactionPool;
-use sp_api::{ProvideRuntimeApi, TransactionFor};
+use sp_api::ProvideRuntimeApi;
 
 const LOG_TARGET: &str = "manual-seal";
 
@@ -66,8 +66,8 @@ struct ManualSealVerifier;
 impl<B: BlockT> Verifier<B> for ManualSealVerifier {
 	async fn verify(
 		&mut self,
-		mut block: BlockImportParams<B, ()>,
-	) -> Result<BlockImportParams<B, ()>, String> {
+		mut block: BlockImportParams<B>,
+	) -> Result<BlockImportParams<B>, String> {
 		block.finalized = false;
 		block.fork_choice = Some(ForkChoiceStrategy::LongestChain);
 		Ok(block)
@@ -75,14 +75,13 @@ impl<B: BlockT> Verifier<B> for ManualSealVerifier {
 }
 
 /// Instantiate the import queue for the manual seal consensus engine.
-pub fn import_queue<Block, Transaction>(
-	block_import: BoxBlockImport<Block, Transaction>,
+pub fn import_queue<Block>(
+	block_import: BoxBlockImport<Block>,
 	spawner: &impl sp_core::traits::SpawnEssentialNamed,
 	registry: Option<&Registry>,
-) -> BasicQueue<Block, Transaction>
+) -> BasicQueue<Block>
 where
 	Block: BlockT,
-	Transaction: Send + Sync + 'static,
 {
 	BasicQueue::new(ManualSealVerifier, block_import, None, spawner, registry)
 }
@@ -109,8 +108,7 @@ pub struct ManualSealParams<B: BlockT, BI, E, C: ProvideRuntimeApi<B>, TP, SC, C
 	pub select_chain: SC,
 
 	/// Digest provider for inclusion in blocks.
-	pub consensus_data_provider:
-		Option<Box<dyn ConsensusDataProvider<B, Proof = P, Transaction = TransactionFor<C, B>>>>,
+	pub consensus_data_provider: Option<Box<dyn ConsensusDataProvider<B, Proof = P>>>,
 
 	/// Something that can create the inherent data providers.
 	pub create_inherent_data_providers: CIDP,
@@ -134,8 +132,7 @@ pub struct InstantSealParams<B: BlockT, BI, E, C: ProvideRuntimeApi<B>, TP, SC,
 	pub select_chain: SC,
 
 	/// Digest provider for inclusion in blocks.
-	pub consensus_data_provider:
-		Option<Box<dyn ConsensusDataProvider<B, Proof = P, Transaction = TransactionFor<C, B>>>>,
+	pub consensus_data_provider: Option<Box<dyn ConsensusDataProvider<B, Proof = P>>>,
 
 	/// Something that can create the inherent data providers.
 	pub create_inherent_data_providers: CIDP,
@@ -167,17 +164,13 @@ pub async fn run_manual_seal<B, BI, CB, E, C, TP, SC, CS, CIDP, P>(
 	}: ManualSealParams<B, BI, E, C, TP, SC, CS, CIDP, P>,
 ) where
 	B: BlockT + 'static,
-	BI: BlockImport<B, Error = sp_consensus::Error, Transaction = sp_api::TransactionFor<C, B>>
-		+ Send
-		+ Sync
-		+ 'static,
+	BI: BlockImport<B, Error = sp_consensus::Error> + Send + Sync + 'static,
 	C: HeaderBackend<B> + Finalizer<B, CB> + ProvideRuntimeApi<B> + 'static,
 	CB: ClientBackend<B> + 'static,
 	E: Environment<B> + 'static,
-	E::Proposer: Proposer<B, Proof = P, Transaction = TransactionFor<C, B>>,
+	E::Proposer: Proposer<B, Proof = P>,
 	CS: Stream<Item = EngineCommand<<B as BlockT>::Hash>> + Unpin + 'static,
 	SC: SelectChain<B> + 'static,
-	TransactionFor<C, B>: 'static,
 	TP: TransactionPool<Block = B>,
 	CIDP: CreateInherentDataProviders<B, ()>,
 	P: Send + Sync + 'static,
@@ -230,16 +223,12 @@ pub async fn run_instant_seal<B, BI, CB, E, C, TP, SC, CIDP, P>(
 	}: InstantSealParams<B, BI, E, C, TP, SC, CIDP, P>,
 ) where
 	B: BlockT + 'static,
-	BI: BlockImport<B, Error = sp_consensus::Error, Transaction = sp_api::TransactionFor<C, B>>
-		+ Send
-		+ Sync
-		+ 'static,
+	BI: BlockImport<B, Error = sp_consensus::Error> + Send + Sync + 'static,
 	C: HeaderBackend<B> + Finalizer<B, CB> + ProvideRuntimeApi<B> + 'static,
 	CB: ClientBackend<B> + 'static,
 	E: Environment<B> + 'static,
-	E::Proposer: Proposer<B, Proof = P, Transaction = TransactionFor<C, B>>,
+	E::Proposer: Proposer<B, Proof = P>,
 	SC: SelectChain<B> + 'static,
-	TransactionFor<C, B>: 'static,
 	TP: TransactionPool<Block = B>,
 	CIDP: CreateInherentDataProviders<B, ()>,
 	P: Send + Sync + 'static,
@@ -284,16 +273,12 @@ pub async fn run_instant_seal_and_finalize<B, BI, CB, E, C, TP, SC, CIDP, P>(
 	}: InstantSealParams<B, BI, E, C, TP, SC, CIDP, P>,
 ) where
 	B: BlockT + 'static,
-	BI: BlockImport<B, Error = sp_consensus::Error, Transaction = sp_api::TransactionFor<C, B>>
-		+ Send
-		+ Sync
-		+ 'static,
+	BI: BlockImport<B, Error = sp_consensus::Error> + Send + Sync + 'static,
 	C: HeaderBackend<B> + Finalizer<B, CB> + ProvideRuntimeApi<B> + 'static,
 	CB: ClientBackend<B> + 'static,
 	E: Environment<B> + 'static,
-	E::Proposer: Proposer<B, Proof = P, Transaction = TransactionFor<C, B>>,
+	E::Proposer: Proposer<B, Proof = P>,
 	SC: SelectChain<B> + 'static,
-	TransactionFor<C, B>: 'static,
 	TP: TransactionPool<Block = B>,
 	CIDP: CreateInherentDataProviders<B, ()>,
 	P: Send + Sync + 'static,
@@ -386,7 +371,6 @@ mod tests {
 		B: BlockT,
 		C: ProvideRuntimeApi<B> + Send + Sync,
 	{
-		type Transaction = TransactionFor<C, B>;
 		type Proof = ();
 
 		fn create_digest(
@@ -400,7 +384,7 @@ mod tests {
 		fn append_block_import(
 			&self,
 			_parent: &B::Header,
-			params: &mut BlockImportParams<B, Self::Transaction>,
+			params: &mut BlockImportParams<B>,
 			_inherents: &InherentData,
 			_proof: Self::Proof,
 		) -> Result<(), Error> {
diff --git a/substrate/client/consensus/manual-seal/src/seal_block.rs b/substrate/client/consensus/manual-seal/src/seal_block.rs
index e6133bccae885b1c34f1e917fc294eac98917b46..4b6230c3efc3d8475b3c7ee44d1dd413bc9a4a54 100644
--- a/substrate/client/consensus/manual-seal/src/seal_block.rs
+++ b/substrate/client/consensus/manual-seal/src/seal_block.rs
@@ -22,7 +22,7 @@ use crate::{rpc, ConsensusDataProvider, CreatedBlock, Error};
 use futures::prelude::*;
 use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction};
 use sc_transaction_pool_api::TransactionPool;
-use sp_api::{ProvideRuntimeApi, TransactionFor};
+use sp_api::ProvideRuntimeApi;
 use sp_blockchain::HeaderBackend;
 use sp_consensus::{self, BlockOrigin, Environment, Proposer, SelectChain};
 use sp_inherents::{CreateInherentDataProviders, InherentDataProvider};
@@ -52,8 +52,7 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi<B>, E, TP
 	/// SelectChain object
 	pub select_chain: &'a SC,
 	/// Digest provider for inclusion in blocks.
-	pub consensus_data_provider:
-		Option<&'a dyn ConsensusDataProvider<B, Proof = P, Transaction = TransactionFor<C, B>>>,
+	pub consensus_data_provider: Option<&'a dyn ConsensusDataProvider<B, Proof = P>>,
 	/// block import object
 	pub block_import: &'a mut BI,
 	/// Something that can create the inherent data providers.
@@ -77,16 +76,12 @@ pub async fn seal_block<B, BI, SC, C, E, TP, CIDP, P>(
 	}: SealBlockParams<'_, B, BI, SC, C, E, TP, CIDP, P>,
 ) where
 	B: BlockT,
-	BI: BlockImport<B, Error = sp_consensus::Error, Transaction = sp_api::TransactionFor<C, B>>
-		+ Send
-		+ Sync
-		+ 'static,
+	BI: BlockImport<B, Error = sp_consensus::Error> + Send + Sync + 'static,
 	C: HeaderBackend<B> + ProvideRuntimeApi<B>,
 	E: Environment<B>,
-	E::Proposer: Proposer<B, Proof = P, Transaction = TransactionFor<C, B>>,
+	E::Proposer: Proposer<B, Proof = P>,
 	TP: TransactionPool<Block = B>,
 	SC: SelectChain<B>,
-	TransactionFor<C, B>: 'static,
 	CIDP: CreateInherentDataProviders<B, ()>,
 	P: Send + Sync + 'static,
 {
diff --git a/substrate/client/consensus/pow/src/lib.rs b/substrate/client/consensus/pow/src/lib.rs
index 763cf10e6cd4f14dedd9ab35b8a26f119518d390..ee5c1dfc6f11a26599c0f01efee9224caded43cd 100644
--- a/substrate/client/consensus/pow/src/lib.rs
+++ b/substrate/client/consensus/pow/src/lib.rs
@@ -237,7 +237,7 @@ impl<B: BlockT, I: Clone, C, S: Clone, Algorithm: Clone, CIDP> Clone
 impl<B, I, C, S, Algorithm, CIDP> PowBlockImport<B, I, C, S, Algorithm, CIDP>
 where
 	B: BlockT,
-	I: BlockImport<B, Transaction = sp_api::TransactionFor<C, B>> + Send + Sync,
+	I: BlockImport<B> + Send + Sync,
 	I::Error: Into<ConsensusError>,
 	C: ProvideRuntimeApi<B> + Send + Sync + HeaderBackend<B> + AuxStore + BlockOf,
 	C::Api: BlockBuilderApi<B>,
@@ -301,7 +301,7 @@ where
 impl<B, I, C, S, Algorithm, CIDP> BlockImport<B> for PowBlockImport<B, I, C, S, Algorithm, CIDP>
 where
 	B: BlockT,
-	I: BlockImport<B, Transaction = sp_api::TransactionFor<C, B>> + Send + Sync,
+	I: BlockImport<B> + Send + Sync,
 	I::Error: Into<ConsensusError>,
 	S: SelectChain<B>,
 	C: ProvideRuntimeApi<B> + Send + Sync + HeaderBackend<B> + AuxStore + BlockOf,
@@ -311,7 +311,6 @@ where
 	CIDP: CreateInherentDataProviders<B, ()> + Send + Sync,
 {
 	type Error = ConsensusError;
-	type Transaction = sp_api::TransactionFor<C, B>;
 
 	async fn check_block(
 		&mut self,
@@ -322,7 +321,7 @@ where
 
 	async fn import_block(
 		&mut self,
-		mut block: BlockImportParams<B, Self::Transaction>,
+		mut block: BlockImportParams<B>,
 	) -> Result<ImportResult, Self::Error> {
 		let best_header = self
 			.select_chain
@@ -444,8 +443,8 @@ where
 {
 	async fn verify(
 		&mut self,
-		mut block: BlockImportParams<B, ()>,
-	) -> Result<BlockImportParams<B, ()>, String> {
+		mut block: BlockImportParams<B>,
+	) -> Result<BlockImportParams<B>, String> {
 		let hash = block.header.hash();
 		let (checked_header, seal) = self.check_header(block.header)?;
 
@@ -460,19 +459,18 @@ where
 }
 
 /// The PoW import queue type.
-pub type PowImportQueue<B, Transaction> = BasicQueue<B, Transaction>;
+pub type PowImportQueue<B> = BasicQueue<B>;
 
 /// Import queue for PoW engine.
-pub fn import_queue<B, Transaction, Algorithm>(
-	block_import: BoxBlockImport<B, Transaction>,
+pub fn import_queue<B, Algorithm>(
+	block_import: BoxBlockImport<B>,
 	justification_import: Option<BoxJustificationImport<B>>,
 	algorithm: Algorithm,
 	spawner: &impl sp_core::traits::SpawnEssentialNamed,
 	registry: Option<&Registry>,
-) -> Result<PowImportQueue<B, Transaction>, sp_consensus::Error>
+) -> Result<PowImportQueue<B>, sp_consensus::Error>
 where
 	B: BlockT,
-	Transaction: Send + Sync + 'static,
 	Algorithm: PowAlgorithm<B> + Clone + Send + Sync + 'static,
 	Algorithm::Difficulty: Send,
 {
@@ -491,7 +489,7 @@ where
 /// `pre_runtime` is a parameter that allows a custom additional pre-runtime digest to be inserted
 /// for blocks being built. This can encode authorship information, or just be a graffiti.
 pub fn start_mining_worker<Block, C, S, Algorithm, E, SO, L, CIDP>(
-	block_import: BoxBlockImport<Block, sp_api::TransactionFor<C, Block>>,
+	block_import: BoxBlockImport<Block>,
 	client: Arc<C>,
 	select_chain: S,
 	algorithm: Algorithm,
@@ -503,18 +501,18 @@ pub fn start_mining_worker<Block, C, S, Algorithm, E, SO, L, CIDP>(
 	timeout: Duration,
 	build_time: Duration,
 ) -> (
-	MiningHandle<Block, Algorithm, C, L, <E::Proposer as Proposer<Block>>::Proof>,
+	MiningHandle<Block, Algorithm, L, <E::Proposer as Proposer<Block>>::Proof>,
 	impl Future<Output = ()>,
 )
 where
 	Block: BlockT,
-	C: ProvideRuntimeApi<Block> + BlockchainEvents<Block> + 'static,
+	C: BlockchainEvents<Block> + 'static,
 	S: SelectChain<Block> + 'static,
 	Algorithm: PowAlgorithm<Block> + Clone,
 	Algorithm::Difficulty: Send + 'static,
 	E: Environment<Block> + Send + Sync + 'static,
 	E::Error: std::fmt::Debug,
-	E::Proposer: Proposer<Block, Transaction = sp_api::TransactionFor<C, Block>>,
+	E::Proposer: Proposer<Block>,
 	SO: SyncOracle + Clone + Send + Sync + 'static,
 	L: sc_consensus::JustificationSyncLink<Block>,
 	CIDP: CreateInherentDataProviders<Block, ()>,
@@ -632,7 +630,7 @@ where
 					},
 				};
 
-			let build = MiningBuild::<Block, Algorithm, C, _> {
+			let build = MiningBuild::<Block, Algorithm, _> {
 				metadata: MiningMetadata {
 					best_hash,
 					pre_hash: proposal.block.header().hash(),
diff --git a/substrate/client/consensus/pow/src/worker.rs b/substrate/client/consensus/pow/src/worker.rs
index 3cb5dfcc09260a6d9dec050f7febdd61954a7c5d..9e9c4fc137d86dd3945d5cbc3aa44b43558a757d 100644
--- a/substrate/client/consensus/pow/src/worker.rs
+++ b/substrate/client/consensus/pow/src/worker.rs
@@ -56,16 +56,11 @@ pub struct MiningMetadata<H, D> {
 }
 
 /// A build of mining, containing the metadata and the block proposal.
-pub struct MiningBuild<
-	Block: BlockT,
-	Algorithm: PowAlgorithm<Block>,
-	C: sp_api::ProvideRuntimeApi<Block>,
-	Proof,
-> {
+pub struct MiningBuild<Block: BlockT, Algorithm: PowAlgorithm<Block>, Proof> {
 	/// Mining metadata.
 	pub metadata: MiningMetadata<Block::Hash, Algorithm::Difficulty>,
 	/// Mining proposal.
-	pub proposal: Proposal<Block, sp_api::TransactionFor<C, Block>, Proof>,
+	pub proposal: Proposal<Block, Proof>,
 }
 
 /// Version of the mining worker.
@@ -76,25 +71,22 @@ pub struct Version(usize);
 pub struct MiningHandle<
 	Block: BlockT,
 	Algorithm: PowAlgorithm<Block>,
-	C: sp_api::ProvideRuntimeApi<Block>,
 	L: sc_consensus::JustificationSyncLink<Block>,
 	Proof,
 > {
 	version: Arc<AtomicUsize>,
 	algorithm: Arc<Algorithm>,
 	justification_sync_link: Arc<L>,
-	build: Arc<Mutex<Option<MiningBuild<Block, Algorithm, C, Proof>>>>,
-	block_import: Arc<Mutex<BoxBlockImport<Block, sp_api::TransactionFor<C, Block>>>>,
+	build: Arc<Mutex<Option<MiningBuild<Block, Algorithm, Proof>>>>,
+	block_import: Arc<Mutex<BoxBlockImport<Block>>>,
 }
 
-impl<Block, Algorithm, C, L, Proof> MiningHandle<Block, Algorithm, C, L, Proof>
+impl<Block, Algorithm, L, Proof> MiningHandle<Block, Algorithm, L, Proof>
 where
 	Block: BlockT,
-	C: sp_api::ProvideRuntimeApi<Block>,
 	Algorithm: PowAlgorithm<Block>,
 	Algorithm::Difficulty: 'static + Send,
 	L: sc_consensus::JustificationSyncLink<Block>,
-	sp_api::TransactionFor<C, Block>: Send + 'static,
 {
 	fn increment_version(&self) {
 		self.version.fetch_add(1, Ordering::SeqCst);
@@ -102,7 +94,7 @@ where
 
 	pub(crate) fn new(
 		algorithm: Algorithm,
-		block_import: BoxBlockImport<Block, sp_api::TransactionFor<C, Block>>,
+		block_import: BoxBlockImport<Block>,
 		justification_sync_link: L,
 	) -> Self {
 		Self {
@@ -120,7 +112,7 @@ where
 		self.increment_version();
 	}
 
-	pub(crate) fn on_build(&self, value: MiningBuild<Block, Algorithm, C, Proof>) {
+	pub(crate) fn on_build(&self, value: MiningBuild<Block, Algorithm, Proof>) {
 		let mut build = self.build.lock();
 		*build = Some(value);
 		self.increment_version();
@@ -224,11 +216,10 @@ where
 	}
 }
 
-impl<Block, Algorithm, C, L, Proof> Clone for MiningHandle<Block, Algorithm, C, L, Proof>
+impl<Block, Algorithm, L, Proof> Clone for MiningHandle<Block, Algorithm, L, Proof>
 where
 	Block: BlockT,
 	Algorithm: PowAlgorithm<Block>,
-	C: sp_api::ProvideRuntimeApi<Block>,
 	L: sc_consensus::JustificationSyncLink<Block>,
 {
 	fn clone(&self) -> Self {
diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs
index 533ddb54e899da60a1c0621a2b433b2c7d347adc..5ee93d168643702387d0ee1529b8875d66b1a860 100644
--- a/substrate/client/consensus/slots/src/lib.rs
+++ b/substrate/client/consensus/slots/src/lib.rs
@@ -53,8 +53,7 @@ const LOG_TARGET: &str = "slots";
 /// The changes that need to applied to the storage to create the state for a block.
 ///
 /// See [`sp_state_machine::StorageChanges`] for more information.
-pub type StorageChanges<Transaction, Block> =
-	sp_state_machine::StorageChanges<Transaction, HashingFor<Block>>;
+pub type StorageChanges<Block> = sp_state_machine::StorageChanges<HashingFor<Block>>;
 
 /// The result of [`SlotWorker::on_slot`].
 #[derive(Debug, Clone)]
@@ -84,9 +83,7 @@ pub trait SlotWorker<B: BlockT, Proof> {
 #[async_trait::async_trait]
 pub trait SimpleSlotWorker<B: BlockT> {
 	/// A handle to a `BlockImport`.
-	type BlockImport: BlockImport<B, Transaction = <Self::Proposer as Proposer<B>>::Transaction>
-		+ Send
-		+ 'static;
+	type BlockImport: BlockImport<B> + Send + 'static;
 
 	/// A handle to a `SyncOracle`.
 	type SyncOracle: SyncOracle;
@@ -148,13 +145,10 @@ pub trait SimpleSlotWorker<B: BlockT> {
 		header: B::Header,
 		header_hash: &B::Hash,
 		body: Vec<B::Extrinsic>,
-		storage_changes: StorageChanges<<Self::BlockImport as BlockImport<B>>::Transaction, B>,
+		storage_changes: StorageChanges<B>,
 		public: Self::Claim,
 		aux_data: Self::AuxData,
-	) -> Result<
-		sc_consensus::BlockImportParams<B, <Self::BlockImport as BlockImport<B>>::Transaction>,
-		sp_consensus::Error,
-	>;
+	) -> Result<sc_consensus::BlockImportParams<B>, sp_consensus::Error>;
 
 	/// Whether to force authoring if offline.
 	fn force_authoring(&self) -> bool;
@@ -191,13 +185,7 @@ pub trait SimpleSlotWorker<B: BlockT> {
 		claim: &Self::Claim,
 		slot_info: SlotInfo<B>,
 		end_proposing_at: Instant,
-	) -> Option<
-		Proposal<
-			B,
-			<Self::Proposer as Proposer<B>>::Transaction,
-			<Self::Proposer as Proposer<B>>::Proof,
-		>,
-	> {
+	) -> Option<Proposal<B, <Self::Proposer as Proposer<B>>::Proof>> {
 		let slot = slot_info.slot;
 		let telemetry = self.telemetry();
 		let log_target = self.logging_target();
diff --git a/substrate/client/db/src/bench.rs b/substrate/client/db/src/bench.rs
index 9307a63ad444e4934892102ff78a26219fb245fc..38c37a42ede79659639268eb34e10316c8cbed15 100644
--- a/substrate/client/db/src/bench.rs
+++ b/substrate/client/db/src/bench.rs
@@ -32,8 +32,8 @@ use sp_runtime::{
 	StateVersion, Storage,
 };
 use sp_state_machine::{
-	backend::Backend as StateBackend, ChildStorageCollection, DBValue, IterArgs, StorageCollection,
-	StorageIterator, StorageKey, StorageValue,
+	backend::Backend as StateBackend, BackendTransaction, ChildStorageCollection, DBValue,
+	IterArgs, StorageCollection, StorageIterator, StorageKey, StorageValue,
 };
 use sp_trie::{
 	cache::{CacheSize, SharedTrieCache},
@@ -343,7 +343,6 @@ fn state_err() -> String {
 
 impl<B: BlockT> StateBackend<HashingFor<B>> for BenchmarkingState<B> {
 	type Error = <DbState<B> as StateBackend<HashingFor<B>>>::Error;
-	type Transaction = <DbState<B> as StateBackend<HashingFor<B>>>::Transaction;
 	type TrieBackendStorage = <DbState<B> as StateBackend<HashingFor<B>>>::TrieBackendStorage;
 	type RawIter = RawIter<B>;
 
@@ -423,7 +422,7 @@ impl<B: BlockT> StateBackend<HashingFor<B>> for BenchmarkingState<B> {
 		&self,
 		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
 		state_version: StateVersion,
-	) -> (B::Hash, Self::Transaction) {
+	) -> (B::Hash, BackendTransaction<HashingFor<B>>) {
 		self.state
 			.borrow()
 			.as_ref()
@@ -435,7 +434,7 @@ impl<B: BlockT> StateBackend<HashingFor<B>> for BenchmarkingState<B> {
 		child_info: &ChildInfo,
 		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
 		state_version: StateVersion,
-	) -> (B::Hash, bool, Self::Transaction) {
+	) -> (B::Hash, bool, BackendTransaction<HashingFor<B>>) {
 		self.state
 			.borrow()
 			.as_ref()
@@ -460,7 +459,7 @@ impl<B: BlockT> StateBackend<HashingFor<B>> for BenchmarkingState<B> {
 	fn commit(
 		&self,
 		storage_root: <HashingFor<B> as Hasher>::Out,
-		mut transaction: Self::Transaction,
+		mut transaction: BackendTransaction<HashingFor<B>>,
 		main_storage_changes: StorageCollection,
 		child_storage_changes: ChildStorageCollection,
 	) -> Result<(), Self::Error> {
diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs
index aba5b0829b5bb8dea1e6b68f7cc6173727ecb417..73fb4f8ce6db37f0857813d5b64821e77413b34b 100644
--- a/substrate/client/db/src/lib.rs
+++ b/substrate/client/db/src/lib.rs
@@ -86,9 +86,9 @@ use sp_runtime::{
 };
 use sp_state_machine::{
 	backend::{AsTrieBackend, Backend as StateBackend},
-	ChildStorageCollection, DBValue, IndexOperation, IterArgs, OffchainChangesCollection,
-	StateMachineStats, StorageCollection, StorageIterator, StorageKey, StorageValue,
-	UsageInfo as StateUsageInfo,
+	BackendTransaction, ChildStorageCollection, DBValue, IndexOperation, IterArgs,
+	OffchainChangesCollection, StateMachineStats, StorageCollection, StorageIterator, StorageKey,
+	StorageValue, UsageInfo as StateUsageInfo,
 };
 use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, PrefixedMemoryDB};
 
@@ -187,7 +187,6 @@ impl<B: BlockT> StorageIterator<HashingFor<B>> for RawIter<B> {
 
 impl<B: BlockT> StateBackend<HashingFor<B>> for RefTrackingState<B> {
 	type Error = <DbState<B> as StateBackend<HashingFor<B>>>::Error;
-	type Transaction = <DbState<B> as StateBackend<HashingFor<B>>>::Transaction;
 	type TrieBackendStorage = <DbState<B> as StateBackend<HashingFor<B>>>::TrieBackendStorage;
 	type RawIter = RawIter<B>;
 
@@ -243,7 +242,7 @@ impl<B: BlockT> StateBackend<HashingFor<B>> for RefTrackingState<B> {
 		&self,
 		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
 		state_version: StateVersion,
-	) -> (B::Hash, Self::Transaction) {
+	) -> (B::Hash, BackendTransaction<HashingFor<B>>) {
 		self.state.storage_root(delta, state_version)
 	}
 
@@ -252,7 +251,7 @@ impl<B: BlockT> StateBackend<HashingFor<B>> for RefTrackingState<B> {
 		child_info: &ChildInfo,
 		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
 		state_version: StateVersion,
-	) -> (B::Hash, bool, Self::Transaction) {
+	) -> (B::Hash, bool, BackendTransaction<HashingFor<B>>) {
 		self.state.child_storage_root(child_info, delta, state_version)
 	}
 
diff --git a/substrate/client/db/src/record_stats_state.rs b/substrate/client/db/src/record_stats_state.rs
index 005315ce9f4585464bce62370f2cf5832f886f46..29ece84f97e5744c7fb0902d5f5fc98ba0eb7ced 100644
--- a/substrate/client/db/src/record_stats_state.rs
+++ b/substrate/client/db/src/record_stats_state.rs
@@ -26,7 +26,7 @@ use sp_runtime::{
 };
 use sp_state_machine::{
 	backend::{AsTrieBackend, Backend as StateBackend},
-	IterArgs, StorageIterator, StorageKey, StorageValue, TrieBackend,
+	BackendTransaction, IterArgs, StorageIterator, StorageKey, StorageValue, TrieBackend,
 };
 use std::sync::Arc;
 
@@ -109,7 +109,6 @@ impl<S: StateBackend<HashingFor<B>>, B: BlockT> StateBackend<HashingFor<B>>
 	for RecordStatsState<S, B>
 {
 	type Error = S::Error;
-	type Transaction = S::Transaction;
 	type TrieBackendStorage = S::TrieBackendStorage;
 	type RawIter = RawIter<S, B>;
 
@@ -173,7 +172,7 @@ impl<S: StateBackend<HashingFor<B>>, B: BlockT> StateBackend<HashingFor<B>>
 		&self,
 		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
 		state_version: StateVersion,
-	) -> (B::Hash, Self::Transaction) {
+	) -> (B::Hash, BackendTransaction<HashingFor<B>>) {
 		self.state.storage_root(delta, state_version)
 	}
 
@@ -182,7 +181,7 @@ impl<S: StateBackend<HashingFor<B>>, B: BlockT> StateBackend<HashingFor<B>>
 		child_info: &ChildInfo,
 		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
 		state_version: StateVersion,
-	) -> (B::Hash, bool, Self::Transaction) {
+	) -> (B::Hash, bool, BackendTransaction<HashingFor<B>>) {
 		self.state.child_storage_root(child_info, delta, state_version)
 	}
 
diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs
index 05ed3ddb798005a0f3f0a59097b6b1b9e5452e43..2a20da5a556b7398397cd9845acdc049961f0ce2 100644
--- a/substrate/client/network/test/src/lib.rs
+++ b/substrate/client/network/test/src/lib.rs
@@ -28,7 +28,6 @@ mod sync;
 
 use std::{
 	collections::HashMap,
-	marker::PhantomData,
 	pin::Pin,
 	sync::Arc,
 	task::{Context as FutureContext, Poll},
@@ -41,7 +40,7 @@ use log::trace;
 use parking_lot::Mutex;
 use sc_block_builder::{BlockBuilder, BlockBuilderProvider};
 use sc_client_api::{
-	backend::{AuxStore, Backend, Finalizer, TransactionFor},
+	backend::{AuxStore, Backend, Finalizer},
 	BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification,
 	FinalityNotifications, ImportNotifications,
 };
@@ -117,8 +116,8 @@ impl PassThroughVerifier {
 impl<B: BlockT> Verifier<B> for PassThroughVerifier {
 	async fn verify(
 		&mut self,
-		mut block: BlockImportParams<B, ()>,
-	) -> Result<BlockImportParams<B, ()>, String> {
+		mut block: BlockImportParams<B>,
+	) -> Result<BlockImportParams<B>, String> {
 		if block.fork_choice.is_none() {
 			block.fork_choice = Some(ForkChoiceStrategy::LongestChain);
 		};
@@ -210,7 +209,6 @@ impl PeersClient {
 #[async_trait::async_trait]
 impl BlockImport<Block> for PeersClient {
 	type Error = ConsensusError;
-	type Transaction = ();
 
 	async fn check_block(
 		&mut self,
@@ -221,9 +219,9 @@ impl BlockImport<Block> for PeersClient {
 
 	async fn import_block(
 		&mut self,
-		block: BlockImportParams<Block, ()>,
+		block: BlockImportParams<Block>,
 	) -> Result<ImportResult, Self::Error> {
-		self.client.import_block(block.clear_storage_changes_and_mutate()).await
+		self.client.import_block(block).await
 	}
 }
 
@@ -248,7 +246,6 @@ pub struct Peer<D, BlockImport> {
 impl<D, B> Peer<D, B>
 where
 	B: BlockImport<Block, Error = ConsensusError> + Send + Sync,
-	B::Transaction: Send,
 {
 	/// Get this peer ID.
 	pub fn id(&self) -> PeerId {
@@ -556,24 +553,12 @@ where
 }
 
 pub trait BlockImportAdapterFull:
-	BlockImport<
-		Block,
-		Transaction = TransactionFor<substrate_test_runtime_client::Backend, Block>,
-		Error = ConsensusError,
-	> + Send
-	+ Sync
-	+ Clone
+	BlockImport<Block, Error = ConsensusError> + Send + Sync + Clone
 {
 }
 
 impl<T> BlockImportAdapterFull for T where
-	T: BlockImport<
-			Block,
-			Transaction = TransactionFor<substrate_test_runtime_client::Backend, Block>,
-			Error = ConsensusError,
-		> + Send
-		+ Sync
-		+ Clone
+	T: BlockImport<Block, Error = ConsensusError> + Send + Sync + Clone
 {
 }
 
@@ -583,27 +568,23 @@ impl<T> BlockImportAdapterFull for T where
 /// This is required as the `TestNetFactory` trait does not distinguish between
 /// full and light nodes.
 #[derive(Clone)]
-pub struct BlockImportAdapter<I, Transaction = ()> {
+pub struct BlockImportAdapter<I> {
 	inner: I,
-	_phantom: PhantomData<Transaction>,
 }
 
-impl<I, Transaction> BlockImportAdapter<I, Transaction> {
+impl<I> BlockImportAdapter<I> {
 	/// Create a new instance of `Self::Full`.
 	pub fn new(inner: I) -> Self {
-		Self { inner, _phantom: PhantomData }
+		Self { inner }
 	}
 }
 
 #[async_trait::async_trait]
-impl<I, Transaction> BlockImport<Block> for BlockImportAdapter<I, Transaction>
+impl<I> BlockImport<Block> for BlockImportAdapter<I>
 where
 	I: BlockImport<Block, Error = ConsensusError> + Send + Sync,
-	I::Transaction: Send,
-	Transaction: Send + 'static,
 {
 	type Error = ConsensusError;
-	type Transaction = Transaction;
 
 	async fn check_block(
 		&mut self,
@@ -614,9 +595,9 @@ where
 
 	async fn import_block(
 		&mut self,
-		block: BlockImportParams<Block, Self::Transaction>,
+		block: BlockImportParams<Block>,
 	) -> Result<ImportResult, Self::Error> {
-		self.inner.import_block(block.clear_storage_changes_and_mutate()).await
+		self.inner.import_block(block).await
 	}
 }
 
@@ -630,8 +611,8 @@ struct VerifierAdapter<B: BlockT> {
 impl<B: BlockT> Verifier<B> for VerifierAdapter<B> {
 	async fn verify(
 		&mut self,
-		block: BlockImportParams<B, ()>,
-	) -> Result<BlockImportParams<B, ()>, String> {
+		block: BlockImportParams<B>,
+	) -> Result<BlockImportParams<B>, String> {
 		let hash = block.header.hash();
 		self.verifier.lock().await.verify(block).await.map_err(|e| {
 			self.failed_verifications.lock().insert(hash, e.clone());
@@ -714,10 +695,7 @@ pub struct FullPeerConfig {
 }
 
 #[async_trait::async_trait]
-pub trait TestNetFactory: Default + Sized + Send
-where
-	<Self::BlockImport as BlockImport<Block>>::Transaction: Send,
-{
+pub trait TestNetFactory: Default + Sized + Send {
 	type Verifier: 'static + Verifier<Block>;
 	type BlockImport: BlockImport<Block, Error = ConsensusError> + Clone + Send + Sync + 'static;
 	type PeerData: Default + Send;
diff --git a/substrate/client/network/test/src/service.rs b/substrate/client/network/test/src/service.rs
index e2a9cb5f3bafd843aa5a7d0f5dbfda0d50af8a52..68e780545bb173775fc49268a704e0e37b2fa8c3 100644
--- a/substrate/client/network/test/src/service.rs
+++ b/substrate/client/network/test/src/service.rs
@@ -135,8 +135,8 @@ impl TestNetworkBuilder {
 		impl<B: BlockT> sc_consensus::Verifier<B> for PassThroughVerifier {
 			async fn verify(
 				&mut self,
-				mut block: sc_consensus::BlockImportParams<B, ()>,
-			) -> Result<sc_consensus::BlockImportParams<B, ()>, String> {
+				mut block: sc_consensus::BlockImportParams<B>,
+			) -> Result<sc_consensus::BlockImportParams<B>, String> {
 				block.finalized = self.0;
 				block.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain);
 				Ok(block)
diff --git a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs
index 54c585932a744017c8ce6b38184fcfacf94f1b9e..6e92e87608b44abab9a4133e456e4f4f7272819c 100644
--- a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs
+++ b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs
@@ -203,10 +203,7 @@ impl<
 impl<Block: BlockT, Client: CallApiAt<Block>> CallApiAt<Block> for ChainHeadMockClient<Client> {
 	type StateBackend = <Client as CallApiAt<Block>>::StateBackend;
 
-	fn call_api_at(
-		&self,
-		params: CallApiAtParams<Block, <Client as CallApiAt<Block>>::StateBackend>,
-	) -> Result<Vec<u8>, sp_api::ApiError> {
+	fn call_api_at(&self, params: CallApiAtParams<Block>) -> Result<Vec<u8>, sp_api::ApiError> {
 		self.client.call_api_at(params)
 	}
 
diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs
index b942ac58aa99b678218df460532c0cb47efbb5b0..fe18d1d002d56c9a6ab7ae6a3175be488f660c90 100644
--- a/substrate/client/service/src/builder.rs
+++ b/substrate/client/service/src/builder.rs
@@ -380,7 +380,7 @@ where
 	<TCl as ProvideRuntimeApi<TBl>>::Api: sp_api::Metadata<TBl>
 		+ sp_transaction_pool::runtime_api::TaggedTransactionQueue<TBl>
 		+ sp_session::SessionKeys<TBl>
-		+ sp_api::ApiExt<TBl, StateBackend = TBackend::State>,
+		+ sp_api::ApiExt<TBl>,
 	TBl: BlockT,
 	TBl::Hash: Unpin,
 	TBl::Header: Unpin,
diff --git a/substrate/client/service/src/client/call_executor.rs b/substrate/client/service/src/client/call_executor.rs
index facde72321db4ab648e895778beacb09992701f2..86b5c7c61fcd2cbe062784fe53f8e9ba30adf6e0 100644
--- a/substrate/client/service/src/client/call_executor.rs
+++ b/substrate/client/service/src/client/call_executor.rs
@@ -21,10 +21,13 @@ use sc_client_api::{
 	backend, call_executor::CallExecutor, execution_extensions::ExecutionExtensions, HeaderBackend,
 };
 use sc_executor::{RuntimeVersion, RuntimeVersionOf};
-use sp_api::{ProofRecorder, StorageTransactionCache};
+use sp_api::ProofRecorder;
 use sp_core::traits::{CallContext, CodeExecutor, RuntimeCode};
 use sp_externalities::Extensions;
-use sp_runtime::{generic::BlockId, traits::Block as BlockT};
+use sp_runtime::{
+	generic::BlockId,
+	traits::{Block as BlockT, HashingFor},
+};
 use sp_state_machine::{backend::AsTrieBackend, Ext, OverlayedChanges, StateMachine, StorageProof};
 use std::{cell::RefCell, sync::Arc};
 
@@ -119,8 +122,7 @@ where
 	) -> sp_blockchain::Result<RuntimeVersion> {
 		let mut overlay = OverlayedChanges::default();
 
-		let mut cache = StorageTransactionCache::<Block, B::State>::default();
-		let mut ext = Ext::new(&mut overlay, &mut cache, state, None);
+		let mut ext = Ext::new(&mut overlay, state, None);
 
 		self.executor
 			.runtime_version(&mut ext, code)
@@ -197,14 +199,11 @@ where
 		at_hash: Block::Hash,
 		method: &str,
 		call_data: &[u8],
-		changes: &RefCell<OverlayedChanges>,
-		storage_transaction_cache: Option<&RefCell<StorageTransactionCache<Block, B::State>>>,
+		changes: &RefCell<OverlayedChanges<HashingFor<Block>>>,
 		recorder: &Option<ProofRecorder<Block>>,
 		call_context: CallContext,
 		extensions: &RefCell<Extensions>,
 	) -> Result<Vec<u8>, sp_blockchain::Error> {
-		let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut());
-
 		let state = self.backend.state_at(at_hash)?;
 
 		let changes = &mut *changes.borrow_mut();
@@ -237,7 +236,6 @@ where
 					&runtime_code,
 					call_context,
 				)
-				.with_storage_transaction_cache(storage_transaction_cache.as_deref_mut())
 				.set_parent_hash(at_hash);
 				state_machine.execute()
 			},
@@ -252,7 +250,6 @@ where
 					&runtime_code,
 					call_context,
 				)
-				.with_storage_transaction_cache(storage_transaction_cache.as_deref_mut())
 				.set_parent_hash(at_hash);
 				state_machine.execute()
 			},
diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs
index d0a46ab2c01183cd3ea17499a2000c9849300ad1..a0983d823e5b19a5758e281e91499085f9d813da 100644
--- a/substrate/client/service/src/client/client.rs
+++ b/substrate/client/service/src/client/client.rs
@@ -148,9 +148,9 @@ impl<H> PrePostHeader<H> {
 	}
 }
 
-enum PrepareStorageChangesResult<B: backend::Backend<Block>, Block: BlockT> {
+enum PrepareStorageChangesResult<Block: BlockT> {
 	Discard(ImportResult),
-	Import(Option<sc_consensus::StorageChanges<Block, backend::TransactionFor<B, Block>>>),
+	Import(Option<sc_consensus::StorageChanges<Block>>),
 }
 
 /// Create an instance of in-memory client.
@@ -489,15 +489,12 @@ where
 	fn apply_block(
 		&self,
 		operation: &mut ClientImportOperation<Block, B>,
-		import_block: BlockImportParams<Block, backend::TransactionFor<B, Block>>,
-		storage_changes: Option<
-			sc_consensus::StorageChanges<Block, backend::TransactionFor<B, Block>>,
-		>,
+		import_block: BlockImportParams<Block>,
+		storage_changes: Option<sc_consensus::StorageChanges<Block>>,
 	) -> sp_blockchain::Result<ImportResult>
 	where
 		Self: ProvideRuntimeApi<Block>,
-		<Self as ProvideRuntimeApi<Block>>::Api:
-			CoreApi<Block> + ApiExt<Block, StateBackend = B::State>,
+		<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
 	{
 		let BlockImportParams {
 			origin,
@@ -580,9 +577,7 @@ where
 		justifications: Option<Justifications>,
 		body: Option<Vec<Block::Extrinsic>>,
 		indexed_body: Option<Vec<Vec<u8>>>,
-		storage_changes: Option<
-			sc_consensus::StorageChanges<Block, backend::TransactionFor<B, Block>>,
-		>,
+		storage_changes: Option<sc_consensus::StorageChanges<Block>>,
 		finalized: bool,
 		aux: Vec<(Vec<u8>, Option<Vec<u8>>)>,
 		fork_choice: ForkChoiceStrategy,
@@ -590,8 +585,7 @@ where
 	) -> sp_blockchain::Result<ImportResult>
 	where
 		Self: ProvideRuntimeApi<Block>,
-		<Self as ProvideRuntimeApi<Block>>::Api:
-			CoreApi<Block> + ApiExt<Block, StateBackend = B::State>,
+		<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
 	{
 		let parent_hash = *import_headers.post().parent_hash();
 		let status = self.backend.blockchain().status(hash)?;
@@ -830,12 +824,11 @@ where
 	/// provided, the block is re-executed to get the storage changes.
 	fn prepare_block_storage_changes(
 		&self,
-		import_block: &mut BlockImportParams<Block, backend::TransactionFor<B, Block>>,
-	) -> sp_blockchain::Result<PrepareStorageChangesResult<B, Block>>
+		import_block: &mut BlockImportParams<Block>,
+	) -> sp_blockchain::Result<PrepareStorageChangesResult<Block>>
 	where
 		Self: ProvideRuntimeApi<Block>,
-		<Self as ProvideRuntimeApi<Block>>::Api:
-			CoreApi<Block> + ApiExt<Block, StateBackend = B::State>,
+		<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
 	{
 		let parent_hash = import_block.header.parent_hash();
 		let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip);
@@ -1418,8 +1411,7 @@ where
 	E: CallExecutor<Block> + Send + Sync + 'static,
 	Block: BlockT,
 	Self: ChainHeaderBackend<Block> + ProvideRuntimeApi<Block>,
-	<Self as ProvideRuntimeApi<Block>>::Api:
-		ApiExt<Block, StateBackend = backend::StateBackendFor<B, Block>> + BlockBuilderApi<Block>,
+	<Self as ProvideRuntimeApi<Block>>::Api: ApiExt<Block> + BlockBuilderApi<Block>,
 {
 	fn new_block_at<R: Into<RecordProof>>(
 		&self,
@@ -1705,17 +1697,13 @@ where
 {
 	type StateBackend = B::State;
 
-	fn call_api_at(
-		&self,
-		params: CallApiAtParams<Block, B::State>,
-	) -> Result<Vec<u8>, sp_api::ApiError> {
+	fn call_api_at(&self, params: CallApiAtParams<Block>) -> Result<Vec<u8>, sp_api::ApiError> {
 		self.executor
 			.contextual_call(
 				params.at,
 				params.function,
 				&params.arguments,
 				params.overlayed_changes,
-				Some(params.storage_transaction_cache),
 				params.recorder,
 				params.call_context,
 				params.extensions,
@@ -1754,13 +1742,10 @@ where
 	E: CallExecutor<Block> + Send + Sync,
 	Block: BlockT,
 	Client<B, E, Block, RA>: ProvideRuntimeApi<Block>,
-	<Client<B, E, Block, RA> as ProvideRuntimeApi<Block>>::Api:
-		CoreApi<Block> + ApiExt<Block, StateBackend = B::State>,
+	<Client<B, E, Block, RA> as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
 	RA: Sync + Send,
-	backend::TransactionFor<B, Block>: Send + 'static,
 {
 	type Error = ConsensusError;
-	type Transaction = backend::TransactionFor<B, Block>;
 
 	/// Import a checked and validated block. If a justification is provided in
 	/// `BlockImportParams` then `finalized` *must* be true.
@@ -1773,7 +1758,7 @@ where
 	/// algorithm, don't use this function.
 	async fn import_block(
 		&mut self,
-		mut import_block: BlockImportParams<Block, backend::TransactionFor<B, Block>>,
+		mut import_block: BlockImportParams<Block>,
 	) -> Result<ImportResult, Self::Error> {
 		let span = tracing::span!(tracing::Level::DEBUG, "import_block");
 		let _enter = span.enter();
@@ -1867,17 +1852,14 @@ where
 	E: CallExecutor<Block> + Send + Sync,
 	Block: BlockT,
 	Self: ProvideRuntimeApi<Block>,
-	<Self as ProvideRuntimeApi<Block>>::Api:
-		CoreApi<Block> + ApiExt<Block, StateBackend = B::State>,
+	<Self as ProvideRuntimeApi<Block>>::Api: CoreApi<Block> + ApiExt<Block>,
 	RA: Sync + Send,
-	backend::TransactionFor<B, Block>: Send + 'static,
 {
 	type Error = ConsensusError;
-	type Transaction = backend::TransactionFor<B, Block>;
 
 	async fn import_block(
 		&mut self,
-		import_block: BlockImportParams<Block, Self::Transaction>,
+		import_block: BlockImportParams<Block>,
 	) -> Result<ImportResult, Self::Error> {
 		(&*self).import_block(import_block).await
 	}
diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs
index 4cbbe17a9968b47da05bc96e0229d22bb2114a9b..0c416c73766c8b2f58ca4f9ffbd9ccc93b1903e0 100644
--- a/substrate/frame/support/src/lib.rs
+++ b/substrate/frame/support/src/lib.rs
@@ -170,8 +170,8 @@ impl TypeId for PalletId {
 /// It requires that the given prefix type implements [`Get<'static str>`](traits::Get).
 ///
 /// 4. Let the macro "guess" what kind of prefix type to use. This only supports verbatim or
-///    pallet name. The macro uses the presence of generic arguments to the prefix type as
-///    an indication that it should use the pallet name as the `prefix`:
+///    pallet name. The macro uses the presence of generic arguments to the prefix type as an
+///    indication that it should use the pallet name as the `prefix`:
 #[doc = docify::embed!("src/tests/storage_alias.rs", storage_alias_guess)]
 pub use frame_support_procedural::storage_alias;
 
diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs
index cb683b664495c71b3b83f35511a1e72ede6a9dad..66bc5b0e9e5e3957db582ad61fd73a966b5841e6 100644
--- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs
+++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs
@@ -229,10 +229,7 @@ fn generate_runtime_api_base_structures() -> Result<TokenStream> {
 			pub struct RuntimeApiImpl<Block: #crate_::BlockT, C: #crate_::CallApiAt<Block> + 'static> {
 				call: &'static C,
 				transaction_depth: std::cell::RefCell<u16>,
-				changes: std::cell::RefCell<#crate_::OverlayedChanges>,
-				storage_transaction_cache: std::cell::RefCell<
-					#crate_::StorageTransactionCache<Block, C::StateBackend>
-				>,
+				changes: std::cell::RefCell<#crate_::OverlayedChanges<#crate_::HashingFor<Block>>>,
 				recorder: std::option::Option<#crate_::ProofRecorder<Block>>,
 				call_context: #crate_::CallContext,
 				extensions: std::cell::RefCell<#crate_::Extensions>,
@@ -242,8 +239,6 @@ fn generate_runtime_api_base_structures() -> Result<TokenStream> {
 			impl<Block: #crate_::BlockT, C: #crate_::CallApiAt<Block>> #crate_::ApiExt<Block> for
 				RuntimeApiImpl<Block, C>
 			{
-				type StateBackend = C::StateBackend;
-
 				fn execute_in_transaction<F: FnOnce(&Self) -> #crate_::TransactionOutcome<R>, R>(
 					&self,
 					call: F,
@@ -305,22 +300,21 @@ fn generate_runtime_api_base_structures() -> Result<TokenStream> {
 					})
 				}
 
-				fn into_storage_changes(
+				fn into_storage_changes<B: #crate_::StateBackend<#crate_::HashingFor<Block>>>(
 					&self,
-					backend: &Self::StateBackend,
+					backend: &B,
 					parent_hash: Block::Hash,
 				) -> core::result::Result<
-					#crate_::StorageChanges<C::StateBackend, Block>,
+					#crate_::StorageChanges<Block>,
 				String
 					> where Self: Sized {
 						let state_version = #crate_::CallApiAt::<Block>::runtime_version_at(self.call, std::clone::Clone::clone(&parent_hash))
 							.map(|v| #crate_::RuntimeVersion::state_version(&v))
 							.map_err(|e| format!("Failed to get state version: {}", e))?;
 
-						#crate_::OverlayedChanges::into_storage_changes(
-							std::cell::RefCell::take(&self.changes),
+						#crate_::OverlayedChanges::drain_storage_changes(
+							&mut std::cell::RefCell::borrow_mut(&self.changes),
 							backend,
-							core::cell::RefCell::take(&self.storage_transaction_cache),
 							state_version,
 						)
 					}
@@ -349,7 +343,6 @@ fn generate_runtime_api_base_structures() -> Result<TokenStream> {
 						transaction_depth: 0.into(),
 						changes: std::default::Default::default(),
 						recorder: std::default::Default::default(),
-						storage_transaction_cache: std::default::Default::default(),
 						call_context: #crate_::CallContext::Offchain,
 						extensions: std::default::Default::default(),
 						extensions_generated_for: std::default::Default::default(),
@@ -535,7 +528,6 @@ impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> {
 						function: (*fn_name)(version),
 						arguments: params,
 						overlayed_changes: &self.changes,
-						storage_transaction_cache: &self.storage_transaction_cache,
 						call_context: self.call_context,
 						recorder: &self.recorder,
 						extensions: &self.extensions,
diff --git a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs
index 0a1a89c6dacb54fe64eb5e0483a1d7997389e091..c1339ff6621b389e32c2f9ca2a7f19079c2d269e 100644
--- a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs
+++ b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs
@@ -66,8 +66,6 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result<To
 
 	Ok(quote!(
 		impl #crate_::ApiExt<#block_type> for #self_ty {
-			type StateBackend = #crate_::InMemoryBackend<#crate_::HashingFor<#block_type>>;
-
 			fn execute_in_transaction<F: FnOnce(&Self) -> #crate_::TransactionOutcome<R>, R>(
 				&self,
 				call: F,
@@ -111,12 +109,12 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result<To
 				unimplemented!("`proof_recorder` not implemented for runtime api mocks")
 			}
 
-			fn into_storage_changes(
+			fn into_storage_changes<B: #crate_::StateBackend<#crate_::HashingFor<#block_type>>>(
 				&self,
-				_: &Self::StateBackend,
+				_: &B,
 				_: <#block_type as #crate_::BlockT>::Hash,
 			) -> std::result::Result<
-				#crate_::StorageChanges<Self::StateBackend, #block_type>,
+				#crate_::StorageChanges<#block_type>,
 				String
 			> where Self: Sized {
 				unimplemented!("`into_storage_changes` not implemented for runtime api mocks")
diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs
index 2cc20dcb356a80bc1a40f9bb5512825bdee9fb85..e575f6b9bbff5f12032582a570f2d82c17ea5b67 100644
--- a/substrate/primitives/api/src/lib.rs
+++ b/substrate/primitives/api/src/lib.rs
@@ -456,28 +456,8 @@ pub use sp_api_proc_macro::mock_impl_runtime_apis;
 #[cfg(feature = "std")]
 pub type ProofRecorder<B> = sp_trie::recorder::Recorder<HashingFor<B>>;
 
-/// A type that is used as cache for the storage transactions.
 #[cfg(feature = "std")]
-pub type StorageTransactionCache<Block, Backend> = sp_state_machine::StorageTransactionCache<
-	<Backend as StateBackend<HashingFor<Block>>>::Transaction,
-	HashingFor<Block>,
->;
-
-#[cfg(feature = "std")]
-pub type StorageChanges<SBackend, Block> = sp_state_machine::StorageChanges<
-	<SBackend as StateBackend<HashingFor<Block>>>::Transaction,
-	HashingFor<Block>,
->;
-
-/// Extract the state backend type for a type that implements `ProvideRuntimeApi`.
-#[cfg(feature = "std")]
-pub type StateBackendFor<P, Block> =
-	<<P as ProvideRuntimeApi<Block>>::Api as ApiExt<Block>>::StateBackend;
-
-/// Extract the state backend transaction type for a type that implements `ProvideRuntimeApi`.
-#[cfg(feature = "std")]
-pub type TransactionFor<P, Block> =
-	<StateBackendFor<P, Block> as StateBackend<HashingFor<Block>>>::Transaction;
+pub type StorageChanges<Block> = sp_state_machine::StorageChanges<HashingFor<Block>>;
 
 /// Something that can be constructed to a runtime api.
 #[cfg(feature = "std")]
@@ -531,9 +511,6 @@ pub enum ApiError {
 /// Extends the runtime api implementation with some common functionality.
 #[cfg(feature = "std")]
 pub trait ApiExt<Block: BlockT> {
-	/// The state backend that is used to store the block states.
-	type StateBackend: StateBackend<HashingFor<Block>>;
-
 	/// Execute the given closure inside a new transaction.
 	///
 	/// Depending on the outcome of the closure, the transaction is committed or rolled-back.
@@ -582,11 +559,11 @@ pub trait ApiExt<Block: BlockT> {
 	/// api functions.
 	///
 	/// After executing this function, all collected changes are reset.
-	fn into_storage_changes(
+	fn into_storage_changes<B: StateBackend<HashingFor<Block>>>(
 		&self,
-		backend: &Self::StateBackend,
+		backend: &B,
 		parent_hash: Block::Hash,
-	) -> Result<StorageChanges<Self::StateBackend, Block>, String>
+	) -> Result<StorageChanges<Block>, String>
 	where
 		Self: Sized;
 
@@ -599,7 +576,7 @@ pub trait ApiExt<Block: BlockT> {
 
 /// Parameters for [`CallApiAt::call_api_at`].
 #[cfg(feature = "std")]
-pub struct CallApiAtParams<'a, Block: BlockT, Backend: StateBackend<HashingFor<Block>>> {
+pub struct CallApiAtParams<'a, Block: BlockT> {
 	/// The block id that determines the state that should be setup when calling the function.
 	pub at: Block::Hash,
 	/// The name of the function that should be called.
@@ -607,9 +584,7 @@ pub struct CallApiAtParams<'a, Block: BlockT, Backend: StateBackend<HashingFor<B
 	/// The encoded arguments of the function.
 	pub arguments: Vec<u8>,
 	/// The overlayed changes that are on top of the state.
-	pub overlayed_changes: &'a RefCell<OverlayedChanges>,
-	/// The cache for storage transactions.
-	pub storage_transaction_cache: &'a RefCell<StorageTransactionCache<Block, Backend>>,
+	pub overlayed_changes: &'a RefCell<OverlayedChanges<HashingFor<Block>>>,
 	/// The call context of this call.
 	pub call_context: CallContext,
 	/// The optional proof recorder for recording storage accesses.
@@ -626,10 +601,7 @@ pub trait CallApiAt<Block: BlockT> {
 
 	/// Calls the given api function with the given encoded arguments at the given block and returns
 	/// the encoded result.
-	fn call_api_at(
-		&self,
-		params: CallApiAtParams<Block, Self::StateBackend>,
-	) -> Result<Vec<u8>, ApiError>;
+	fn call_api_at(&self, params: CallApiAtParams<Block>) -> Result<Vec<u8>, ApiError>;
 
 	/// Returns the runtime version at the given block.
 	fn runtime_version_at(&self, at_hash: Block::Hash) -> Result<RuntimeVersion, ApiError>;
diff --git a/substrate/primitives/consensus/common/src/lib.rs b/substrate/primitives/consensus/common/src/lib.rs
index 2203110437c822590b9fabf283ab18665b45a9e1..6505d005deb8df42c04252be85afd7e7b55924b9 100644
--- a/substrate/primitives/consensus/common/src/lib.rs
+++ b/substrate/primitives/consensus/common/src/lib.rs
@@ -91,13 +91,13 @@ pub trait Environment<B: BlockT> {
 }
 
 /// A proposal that is created by a [`Proposer`].
-pub struct Proposal<Block: BlockT, Transaction, Proof> {
+pub struct Proposal<Block: BlockT, Proof> {
 	/// The block that was build.
 	pub block: Block,
 	/// Proof that was recorded while building the block.
 	pub proof: Proof,
 	/// The storage changes while building this block.
-	pub storage_changes: sp_state_machine::StorageChanges<Transaction, HashingFor<Block>>,
+	pub storage_changes: sp_state_machine::StorageChanges<HashingFor<Block>>,
 }
 
 /// Error that is returned when [`ProofRecording`] requested to record a proof,
@@ -177,10 +177,8 @@ mod private {
 pub trait Proposer<B: BlockT> {
 	/// Error type which can occur when proposing or evaluating.
 	type Error: From<Error> + std::error::Error + 'static;
-	/// The transaction type used by the backend.
-	type Transaction: Default + Send + 'static;
 	/// Future that resolves to a committed proposal with an optional proof.
-	type Proposal: Future<Output = Result<Proposal<B, Self::Transaction, Self::Proof>, Self::Error>>
+	type Proposal: Future<Output = Result<Proposal<B, Self::Proof>, Self::Error>>
 		+ Send
 		+ Unpin
 		+ 'static;
diff --git a/substrate/primitives/state-machine/src/backend.rs b/substrate/primitives/state-machine/src/backend.rs
index f3244308a54cf624ec393b7c9f5775c4f7367374..2a25bdc54d9491faaf500f760aae7550dd1dadd3 100644
--- a/substrate/primitives/state-machine/src/backend.rs
+++ b/substrate/primitives/state-machine/src/backend.rs
@@ -30,6 +30,7 @@ use sp_core::storage::{ChildInfo, StateVersion, TrackedStorageKey};
 #[cfg(feature = "std")]
 use sp_core::traits::RuntimeCode;
 use sp_std::vec::Vec;
+use sp_trie::PrefixedMemoryDB;
 
 /// A struct containing arguments for iterating over the storage.
 #[derive(Default)]
@@ -168,6 +169,12 @@ where
 	}
 }
 
+/// The transaction type used by [`Backend`].
+///
+/// This transaction contains all the changes that need to be applied to the backend to create the
+/// state for a new block.
+pub type BackendTransaction<H> = PrefixedMemoryDB<H>;
+
 /// A state backend is used to read state data and can have changes committed
 /// to it.
 ///
@@ -176,11 +183,8 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
 	/// An error type when fetching data is not possible.
 	type Error: super::Error;
 
-	/// Storage changes to be applied if committing
-	type Transaction: Consolidate + Default + Send;
-
 	/// Type of trie backend storage.
-	type TrieBackendStorage: TrieBackendStorage<H, Overlay = Self::Transaction>;
+	type TrieBackendStorage: TrieBackendStorage<H>;
 
 	/// Type of the raw storage iterator.
 	type RawIter: StorageIterator<H, Backend = Self, Error = Self::Error>;
@@ -236,7 +240,7 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
 		&self,
 		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
 		state_version: StateVersion,
-	) -> (H::Out, Self::Transaction)
+	) -> (H::Out, BackendTransaction<H>)
 	where
 		H::Out: Ord;
 
@@ -248,7 +252,7 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
 		child_info: &ChildInfo,
 		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
 		state_version: StateVersion,
-	) -> (H::Out, bool, Self::Transaction)
+	) -> (H::Out, bool, BackendTransaction<H>)
 	where
 		H::Out: Ord;
 
@@ -283,11 +287,11 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
 			Item = (&'a ChildInfo, impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>),
 		>,
 		state_version: StateVersion,
-	) -> (H::Out, Self::Transaction)
+	) -> (H::Out, BackendTransaction<H>)
 	where
 		H::Out: Ord + Encode,
 	{
-		let mut txs: Self::Transaction = Default::default();
+		let mut txs = BackendTransaction::default();
 		let mut child_roots: Vec<_> = Default::default();
 		// child first
 		for (child_info, child_delta) in child_deltas {
@@ -308,6 +312,7 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
 			state_version,
 		);
 		txs.consolidate(parent_txs);
+
 		(root, txs)
 	}
 
@@ -331,7 +336,7 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
 	fn commit(
 		&self,
 		_: H::Out,
-		_: Self::Transaction,
+		_: BackendTransaction<H>,
 		_: StorageCollection,
 		_: ChildStorageCollection,
 	) -> Result<(), Self::Error> {
@@ -377,34 +382,6 @@ pub trait AsTrieBackend<H: Hasher, C = sp_trie::cache::LocalTrieCache<H>> {
 	fn as_trie_backend(&self) -> &TrieBackend<Self::TrieBackendStorage, H, C>;
 }
 
-/// Trait that allows consolidate two transactions together.
-pub trait Consolidate {
-	/// Consolidate two transactions into one.
-	fn consolidate(&mut self, other: Self);
-}
-
-impl Consolidate for () {
-	fn consolidate(&mut self, _: Self) {
-		()
-	}
-}
-
-impl Consolidate for Vec<(Option<ChildInfo>, StorageCollection)> {
-	fn consolidate(&mut self, mut other: Self) {
-		self.append(&mut other);
-	}
-}
-
-impl<H, KF> Consolidate for sp_trie::GenericMemoryDB<H, KF>
-where
-	H: Hasher,
-	KF: sp_trie::KeyFunction<H>,
-{
-	fn consolidate(&mut self, other: Self) {
-		sp_trie::GenericMemoryDB::consolidate(self, other)
-	}
-}
-
 /// Wrapper to create a [`RuntimeCode`] from a type that implements [`Backend`].
 #[cfg(feature = "std")]
 pub struct BackendRuntimeCode<'a, B, H> {
diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs
index a7adbc8a0daeecce668e6d70f6cc6f3f8ed96cb1..ace88aee2628f556ebafffa56543a7400c9428aa 100644
--- a/substrate/primitives/state-machine/src/basic.rs
+++ b/substrate/primitives/state-machine/src/basic.rs
@@ -29,7 +29,7 @@ use sp_core::{
 	Blake2Hasher,
 };
 use sp_externalities::{Extension, Extensions, MultiRemovalResults};
-use sp_trie::{empty_child_trie_root, HashKey, LayoutV0, LayoutV1, TrieConfiguration};
+use sp_trie::{empty_child_trie_root, LayoutV0, LayoutV1, TrieConfiguration};
 use std::{
 	any::{Any, TypeId},
 	collections::BTreeMap,
@@ -39,7 +39,7 @@ use std::{
 /// Simple Map-based Externalities impl.
 #[derive(Debug)]
 pub struct BasicExternalities {
-	overlay: OverlayedChanges,
+	overlay: OverlayedChanges<Blake2Hasher>,
 	extensions: Extensions,
 }
 
@@ -282,7 +282,7 @@ impl Externalities for BasicExternalities {
 		if let Some((data, child_info)) = self.overlay.child_changes(child_info.storage_key()) {
 			let delta =
 				data.into_iter().map(|(k, v)| (k.as_ref(), v.value().map(|v| v.as_slice())));
-			crate::in_memory_backend::new_in_mem::<Blake2Hasher, HashKey<_>>()
+			crate::in_memory_backend::new_in_mem::<Blake2Hasher>()
 				.child_storage_root(&child_info, delta, state_version)
 				.0
 		} else {
diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs
index 3c088a2176582ec2a5d4c23036740ff8b0f4d274..11df46f2a4a3a901753c10a5acf1b6535e61f8ea 100644
--- a/substrate/primitives/state-machine/src/ext.rs
+++ b/substrate/primitives/state-machine/src/ext.rs
@@ -22,7 +22,7 @@ use crate::overlayed_changes::OverlayedExtensions;
 use crate::{
 	backend::Backend, IndexOperation, IterArgs, OverlayedChanges, StorageKey, StorageValue,
 };
-use codec::{Decode, Encode, EncodeAppend};
+use codec::{Encode, EncodeAppend};
 use hash_db::Hasher;
 #[cfg(feature = "std")]
 use sp_core::hexdisplay::HexDisplay;
@@ -30,9 +30,8 @@ use sp_core::storage::{
 	well_known_keys::is_child_storage_key, ChildInfo, StateVersion, TrackedStorageKey,
 };
 use sp_externalities::{Extension, ExtensionStore, Externalities, MultiRemovalResults};
-use sp_trie::{empty_child_trie_root, LayoutV1};
 
-use crate::{log_error, trace, warn, StorageTransactionCache};
+use crate::{log_error, trace, warn};
 use sp_std::{
 	any::{Any, TypeId},
 	boxed::Box,
@@ -98,11 +97,9 @@ where
 	B: 'a + Backend<H>,
 {
 	/// The overlayed changes to write to.
-	overlay: &'a mut OverlayedChanges,
+	overlay: &'a mut OverlayedChanges<H>,
 	/// The storage backend to read from.
 	backend: &'a B,
-	/// The cache for the storage transactions.
-	storage_transaction_cache: &'a mut StorageTransactionCache<B::Transaction, H>,
 	/// Pseudo-unique id used for tracing.
 	pub id: u16,
 	/// Extensions registered with this instance.
@@ -117,37 +114,24 @@ where
 {
 	/// Create a new `Ext`.
 	#[cfg(not(feature = "std"))]
-	pub fn new(
-		overlay: &'a mut OverlayedChanges,
-		storage_transaction_cache: &'a mut StorageTransactionCache<B::Transaction, H>,
-		backend: &'a B,
-	) -> Self {
-		Ext { overlay, backend, id: 0, storage_transaction_cache }
+	pub fn new(overlay: &'a mut OverlayedChanges<H>, backend: &'a B) -> Self {
+		Ext { overlay, backend, id: 0 }
 	}
 
 	/// Create a new `Ext` from overlayed changes and read-only backend
 	#[cfg(feature = "std")]
 	pub fn new(
-		overlay: &'a mut OverlayedChanges,
-		storage_transaction_cache: &'a mut StorageTransactionCache<B::Transaction, H>,
+		overlay: &'a mut OverlayedChanges<H>,
 		backend: &'a B,
 		extensions: Option<&'a mut sp_externalities::Extensions>,
 	) -> Self {
 		Self {
 			overlay,
 			backend,
-			storage_transaction_cache,
 			id: rand::random(),
 			extensions: extensions.map(OverlayedExtensions::new),
 		}
 	}
-
-	/// Invalidates the currently cached storage root and the db transaction.
-	///
-	/// Called when there are changes that likely will invalidate the storage root.
-	fn mark_dirty(&mut self) {
-		self.storage_transaction_cache.reset();
-	}
 }
 
 #[cfg(test)]
@@ -412,7 +396,6 @@ where
 			),
 		);
 
-		self.mark_dirty();
 		self.overlay.set_storage(key, value);
 	}
 
@@ -432,7 +415,6 @@ where
 		);
 		let _guard = guard();
 
-		self.mark_dirty();
 		self.overlay.set_child_storage(child_info, key, value);
 	}
 
@@ -449,7 +431,6 @@ where
 			child_info = %HexDisplay::from(&child_info.storage_key()),
 		);
 		let _guard = guard();
-		self.mark_dirty();
 		let overlay = self.overlay.clear_child_storage(child_info);
 		let (maybe_cursor, backend, loops) =
 			self.limit_remove_from_backend(Some(child_info), None, maybe_limit, maybe_cursor);
@@ -478,7 +459,6 @@ where
 			return MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 }
 		}
 
-		self.mark_dirty();
 		let overlay = self.overlay.clear_prefix(prefix);
 		let (maybe_cursor, backend, loops) =
 			self.limit_remove_from_backend(None, Some(prefix), maybe_limit, maybe_cursor);
@@ -501,7 +481,6 @@ where
 		);
 		let _guard = guard();
 
-		self.mark_dirty();
 		let overlay = self.overlay.clear_child_prefix(child_info, prefix);
 		let (maybe_cursor, backend, loops) = self.limit_remove_from_backend(
 			Some(child_info),
@@ -522,7 +501,6 @@ where
 		);
 
 		let _guard = guard();
-		self.mark_dirty();
 
 		let backend = &mut self.backend;
 		let current_value = self.overlay.value_mut_or_insert_with(&key, || {
@@ -533,27 +511,17 @@ where
 
 	fn storage_root(&mut self, state_version: StateVersion) -> Vec<u8> {
 		let _guard = guard();
-		if let Some(ref root) = self.storage_transaction_cache.transaction_storage_root {
-			trace!(
-				target: "state",
-				method = "StorageRoot",
-				ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
-				storage_root = %HexDisplay::from(&root.as_ref()),
-				cached = true,
-			);
-			return root.encode()
-		}
 
-		let root =
-			self.overlay
-				.storage_root(self.backend, self.storage_transaction_cache, state_version);
+		let (root, _cached) = self.overlay.storage_root(self.backend, state_version);
+
 		trace!(
 			target: "state",
 			method = "StorageRoot",
 			ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
 			storage_root = %HexDisplay::from(&root.as_ref()),
-			cached = false,
+			cached = %_cached,
 		);
+
 		root.encode()
 	}
 
@@ -563,74 +531,22 @@ where
 		state_version: StateVersion,
 	) -> Vec<u8> {
 		let _guard = guard();
-		let storage_key = child_info.storage_key();
-		let prefixed_storage_key = child_info.prefixed_storage_key();
-		if self.storage_transaction_cache.transaction_storage_root.is_some() {
-			let root = self
-				.storage(prefixed_storage_key.as_slice())
-				.and_then(|k| Decode::decode(&mut &k[..]).ok())
-				// V1 is equivalent to V0 on empty root.
-				.unwrap_or_else(empty_child_trie_root::<LayoutV1<H>>);
-			trace!(
-				target: "state",
-				method = "ChildStorageRoot",
-				ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
-				child_info = %HexDisplay::from(&storage_key),
-				storage_root = %HexDisplay::from(&root.as_ref()),
-				cached = true,
-			);
-			root.encode()
-		} else {
-			let root = if let Some((changes, info)) = self.overlay.child_changes(storage_key) {
-				let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref)));
-				Some(self.backend.child_storage_root(info, delta, state_version))
-			} else {
-				None
-			};
-
-			if let Some((root, is_empty, _)) = root {
-				let root = root.encode();
-				// We store update in the overlay in order to be able to use
-				// 'self.storage_transaction' cache. This is brittle as it rely on Ext only querying
-				// the trie backend for storage root.
-				// A better design would be to manage 'child_storage_transaction' in a
-				// similar way as 'storage_transaction' but for each child trie.
-				if is_empty {
-					self.overlay.set_storage(prefixed_storage_key.into_inner(), None);
-				} else {
-					self.overlay.set_storage(prefixed_storage_key.into_inner(), Some(root.clone()));
-				}
 
-				trace!(
-					target: "state",
-					method = "ChildStorageRoot",
-					ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
-					child_info = %HexDisplay::from(&storage_key),
-					storage_root = %HexDisplay::from(&root.as_ref()),
-					cached = false,
-				);
+		let (root, _cached) = self
+			.overlay
+			.child_storage_root(child_info, self.backend, state_version)
+			.expect(EXT_NOT_ALLOWED_TO_FAIL);
 
-				root
-			} else {
-				// empty overlay
-				let root = self
-					.storage(prefixed_storage_key.as_slice())
-					.and_then(|k| Decode::decode(&mut &k[..]).ok())
-					// V1 is equivalent to V0 on empty root.
-					.unwrap_or_else(empty_child_trie_root::<LayoutV1<H>>);
-
-				trace!(
-					target: "state",
-					method = "ChildStorageRoot",
-					ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
-					child_info = %HexDisplay::from(&storage_key),
-					storage_root = %HexDisplay::from(&root.as_ref()),
-					cached = false,
-				);
+		trace!(
+			target: "state",
+			method = "ChildStorageRoot",
+			ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+			child_info = %HexDisplay::from(&child_info.storage_key()),
+			storage_root = %HexDisplay::from(&root.as_ref()),
+			cached = %_cached,
+		);
 
-				root.encode()
-			}
-		}
+		root.encode()
 	}
 
 	fn storage_index_transaction(&mut self, index: u32, hash: &[u8], size: u32) {
@@ -669,7 +585,6 @@ where
 	}
 
 	fn storage_rollback_transaction(&mut self) -> Result<(), ()> {
-		self.mark_dirty();
 		self.overlay.rollback_transaction().map_err(|_| ())
 	}
 
@@ -682,14 +597,9 @@ where
 			self.overlay.rollback_transaction().expect(BENCHMARKING_FN);
 		}
 		self.overlay
-			.drain_storage_changes(
-				self.backend,
-				self.storage_transaction_cache,
-				Default::default(), // using any state
-			)
+			.drain_storage_changes(self.backend, Default::default())
 			.expect(EXT_NOT_ALLOWED_TO_FAIL);
 		self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL);
-		self.mark_dirty();
 		self.overlay
 			.enter_runtime()
 			.expect("We have reset the overlay above, so we can not be in the runtime; qed");
@@ -703,7 +613,7 @@ where
 		}
 		let changes = self
 			.overlay
-			.drain_storage_changes(self.backend, self.storage_transaction_cache, state_version)
+			.drain_storage_changes(self.backend, state_version)
 			.expect(EXT_NOT_ALLOWED_TO_FAIL);
 		self.backend
 			.commit(
@@ -713,7 +623,6 @@ where
 				changes.child_storage_changes,
 			)
 			.expect(EXT_NOT_ALLOWED_TO_FAIL);
-		self.mark_dirty();
 		self.overlay
 			.enter_runtime()
 			.expect("We have reset the overlay above, so we can not be in the runtime; qed");
@@ -914,7 +823,7 @@ where
 mod tests {
 	use super::*;
 	use crate::InMemoryBackend;
-	use codec::Encode;
+	use codec::{Decode, Encode};
 	use sp_core::{
 		map,
 		storage::{Storage, StorageChild},
@@ -926,7 +835,6 @@ mod tests {
 
 	#[test]
 	fn next_storage_key_works() {
-		let mut cache = StorageTransactionCache::default();
 		let mut overlay = OverlayedChanges::default();
 		overlay.set_storage(vec![20], None);
 		overlay.set_storage(vec![30], Some(vec![31]));
@@ -943,7 +851,7 @@ mod tests {
 		)
 			.into();
 
-		let ext = TestExt::new(&mut overlay, &mut cache, &backend, None);
+		let ext = TestExt::new(&mut overlay, &backend, None);
 
 		// next_backend < next_overlay
 		assert_eq!(ext.next_storage_key(&[5]), Some(vec![10]));
@@ -959,7 +867,7 @@ mod tests {
 
 		drop(ext);
 		overlay.set_storage(vec![50], Some(vec![50]));
-		let ext = TestExt::new(&mut overlay, &mut cache, &backend, None);
+		let ext = TestExt::new(&mut overlay, &backend, None);
 
 		// next_overlay exist but next_backend doesn't exist
 		assert_eq!(ext.next_storage_key(&[40]), Some(vec![50]));
@@ -967,7 +875,6 @@ mod tests {
 
 	#[test]
 	fn next_storage_key_works_with_a_lot_empty_values_in_overlay() {
-		let mut cache = StorageTransactionCache::default();
 		let mut overlay = OverlayedChanges::default();
 		overlay.set_storage(vec![20], None);
 		overlay.set_storage(vec![21], None);
@@ -990,7 +897,7 @@ mod tests {
 		)
 			.into();
 
-		let ext = TestExt::new(&mut overlay, &mut cache, &backend, None);
+		let ext = TestExt::new(&mut overlay, &backend, None);
 
 		assert_eq!(ext.next_storage_key(&[5]), Some(vec![30]));
 
@@ -1002,7 +909,6 @@ mod tests {
 		let child_info = ChildInfo::new_default(b"Child1");
 		let child_info = &child_info;
 
-		let mut cache = StorageTransactionCache::default();
 		let mut overlay = OverlayedChanges::default();
 		overlay.set_child_storage(child_info, vec![20], None);
 		overlay.set_child_storage(child_info, vec![30], Some(vec![31]));
@@ -1024,7 +930,7 @@ mod tests {
 		)
 			.into();
 
-		let ext = TestExt::new(&mut overlay, &mut cache, &backend, None);
+		let ext = TestExt::new(&mut overlay, &backend, None);
 
 		// next_backend < next_overlay
 		assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10]));
@@ -1040,7 +946,7 @@ mod tests {
 
 		drop(ext);
 		overlay.set_child_storage(child_info, vec![50], Some(vec![50]));
-		let ext = TestExt::new(&mut overlay, &mut cache, &backend, None);
+		let ext = TestExt::new(&mut overlay, &backend, None);
 
 		// next_overlay exist but next_backend doesn't exist
 		assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50]));
@@ -1050,7 +956,6 @@ mod tests {
 	fn child_storage_works() {
 		let child_info = ChildInfo::new_default(b"Child1");
 		let child_info = &child_info;
-		let mut cache = StorageTransactionCache::default();
 		let mut overlay = OverlayedChanges::default();
 		overlay.set_child_storage(child_info, vec![20], None);
 		overlay.set_child_storage(child_info, vec![30], Some(vec![31]));
@@ -1072,7 +977,7 @@ mod tests {
 		)
 			.into();
 
-		let ext = TestExt::new(&mut overlay, &mut cache, &backend, None);
+		let ext = TestExt::new(&mut overlay, &backend, None);
 
 		assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10]));
 		assert_eq!(
@@ -1094,7 +999,6 @@ mod tests {
 	fn clear_prefix_cannot_delete_a_child_root() {
 		let child_info = ChildInfo::new_default(b"Child1");
 		let child_info = &child_info;
-		let mut cache = StorageTransactionCache::default();
 		let mut overlay = OverlayedChanges::default();
 		let backend = (
 			Storage {
@@ -1112,7 +1016,7 @@ mod tests {
 		)
 			.into();
 
-		let ext = TestExt::new(&mut overlay, &mut cache, &backend, None);
+		let ext = TestExt::new(&mut overlay, &backend, None);
 
 		use sp_core::storage::well_known_keys;
 		let mut ext = ext;
diff --git a/substrate/primitives/state-machine/src/in_memory_backend.rs b/substrate/primitives/state-machine/src/in_memory_backend.rs
index 2c3ed7441501cb1d52823247517b329e2abfd874..ce551cec2a473ceacbc1ab99984af2e5ed4b64f4 100644
--- a/substrate/primitives/state-machine/src/in_memory_backend.rs
+++ b/substrate/primitives/state-machine/src/in_memory_backend.rs
@@ -24,36 +24,22 @@ use crate::{
 use codec::Codec;
 use hash_db::Hasher;
 use sp_core::storage::{ChildInfo, StateVersion, Storage};
-use sp_trie::{empty_trie_root, GenericMemoryDB, HashKey, KeyFunction, LayoutV1, MemoryDB};
+use sp_trie::{empty_trie_root, LayoutV1, PrefixedMemoryDB};
 use std::collections::{BTreeMap, HashMap};
 
 /// Create a new empty instance of in-memory backend.
-///
-/// It will use [`HashKey`] to store the keys internally.
-pub fn new_in_mem_hash_key<H>() -> TrieBackend<MemoryDB<H>, H>
+pub fn new_in_mem<H>() -> TrieBackend<PrefixedMemoryDB<H>, H>
 where
 	H: Hasher,
 	H::Out: Codec + Ord,
 {
-	new_in_mem::<H, HashKey<H>>()
-}
-
-/// Create a new empty instance of in-memory backend.
-pub fn new_in_mem<H, KF>() -> TrieBackend<GenericMemoryDB<H, KF>, H>
-where
-	H: Hasher,
-	H::Out: Codec + Ord,
-	KF: KeyFunction<H> + Send + Sync,
-{
-	let db = GenericMemoryDB::default();
 	// V1 is same as V0 for an empty trie.
-	TrieBackendBuilder::new(db, empty_trie_root::<LayoutV1<H>>()).build()
+	TrieBackendBuilder::new(Default::default(), empty_trie_root::<LayoutV1<H>>()).build()
 }
 
-impl<H: Hasher, KF> TrieBackend<GenericMemoryDB<H, KF>, H>
+impl<H: Hasher> TrieBackend<PrefixedMemoryDB<H>, H>
 where
 	H::Out: Codec + Ord,
-	KF: KeyFunction<H> + Send + Sync,
 {
 	/// Copy the state, with applied updates
 	pub fn update<T: IntoIterator<Item = (Option<ChildInfo>, StorageCollection)>>(
@@ -85,15 +71,16 @@ where
 	}
 
 	/// Merge trie nodes into this backend.
-	pub fn update_backend(&self, root: H::Out, changes: GenericMemoryDB<H, KF>) -> Self {
+	pub fn update_backend(&self, root: H::Out, changes: PrefixedMemoryDB<H>) -> Self {
 		let mut clone = self.backend_storage().clone();
 		clone.consolidate(changes);
 		TrieBackendBuilder::new(clone, root).build()
 	}
 
 	/// Apply the given transaction to this backend and set the root to the given value.
-	pub fn apply_transaction(&mut self, root: H::Out, transaction: GenericMemoryDB<H, KF>) {
+	pub fn apply_transaction(&mut self, root: H::Out, transaction: PrefixedMemoryDB<H>) {
 		let mut storage = sp_std::mem::take(self).into_storage();
+
 		storage.consolidate(transaction);
 		*self = TrieBackendBuilder::new(storage, root).build();
 	}
@@ -104,33 +91,29 @@ where
 	}
 }
 
-impl<H: Hasher, KF> Clone for TrieBackend<GenericMemoryDB<H, KF>, H>
+impl<H: Hasher> Clone for TrieBackend<PrefixedMemoryDB<H>, H>
 where
 	H::Out: Codec + Ord,
-	KF: KeyFunction<H> + Send + Sync,
 {
 	fn clone(&self) -> Self {
 		TrieBackendBuilder::new(self.backend_storage().clone(), *self.root()).build()
 	}
 }
 
-impl<H, KF> Default for TrieBackend<GenericMemoryDB<H, KF>, H>
+impl<H> Default for TrieBackend<PrefixedMemoryDB<H>, H>
 where
 	H: Hasher,
 	H::Out: Codec + Ord,
-	KF: KeyFunction<H> + Send + Sync,
 {
 	fn default() -> Self {
 		new_in_mem()
 	}
 }
 
-impl<H: Hasher, KF>
-	From<(HashMap<Option<ChildInfo>, BTreeMap<StorageKey, StorageValue>>, StateVersion)>
-	for TrieBackend<GenericMemoryDB<H, KF>, H>
+impl<H: Hasher> From<(HashMap<Option<ChildInfo>, BTreeMap<StorageKey, StorageValue>>, StateVersion)>
+	for TrieBackend<PrefixedMemoryDB<H>, H>
 where
 	H::Out: Codec + Ord,
-	KF: KeyFunction<H> + Send + Sync,
 {
 	fn from(
 		(inner, state_version): (
@@ -149,10 +132,9 @@ where
 	}
 }
 
-impl<H: Hasher, KF> From<(Storage, StateVersion)> for TrieBackend<GenericMemoryDB<H, KF>, H>
+impl<H: Hasher> From<(Storage, StateVersion)> for TrieBackend<PrefixedMemoryDB<H>, H>
 where
 	H::Out: Codec + Ord,
-	KF: KeyFunction<H> + Send + Sync,
 {
 	fn from((inners, state_version): (Storage, StateVersion)) -> Self {
 		let mut inner: HashMap<Option<ChildInfo>, BTreeMap<StorageKey, StorageValue>> = inners
@@ -165,11 +147,10 @@ where
 	}
 }
 
-impl<H: Hasher, KF> From<(BTreeMap<StorageKey, StorageValue>, StateVersion)>
-	for TrieBackend<GenericMemoryDB<H, KF>, H>
+impl<H: Hasher> From<(BTreeMap<StorageKey, StorageValue>, StateVersion)>
+	for TrieBackend<PrefixedMemoryDB<H>, H>
 where
 	H::Out: Codec + Ord,
-	KF: KeyFunction<H> + Send + Sync,
 {
 	fn from((inner, state_version): (BTreeMap<StorageKey, StorageValue>, StateVersion)) -> Self {
 		let mut expanded = HashMap::new();
@@ -178,11 +159,10 @@ where
 	}
 }
 
-impl<H: Hasher, KF> From<(Vec<(Option<ChildInfo>, StorageCollection)>, StateVersion)>
-	for TrieBackend<GenericMemoryDB<H, KF>, H>
+impl<H: Hasher> From<(Vec<(Option<ChildInfo>, StorageCollection)>, StateVersion)>
+	for TrieBackend<PrefixedMemoryDB<H>, H>
 where
 	H::Out: Codec + Ord,
-	KF: KeyFunction<H> + Send + Sync,
 {
 	fn from(
 		(inner, state_version): (Vec<(Option<ChildInfo>, StorageCollection)>, StateVersion),
@@ -212,7 +192,7 @@ mod tests {
 	#[test]
 	fn in_memory_with_child_trie_only() {
 		let state_version = StateVersion::default();
-		let storage = new_in_mem_hash_key::<BlakeTwo256>();
+		let storage = new_in_mem::<BlakeTwo256>();
 		let child_info = ChildInfo::new_default(b"1");
 		let child_info = &child_info;
 		let storage = storage.update(
@@ -228,7 +208,7 @@ mod tests {
 	#[test]
 	fn insert_multiple_times_child_data_works() {
 		let state_version = StateVersion::default();
-		let mut storage = new_in_mem_hash_key::<BlakeTwo256>();
+		let mut storage = new_in_mem::<BlakeTwo256>();
 		let child_info = ChildInfo::new_default(b"1");
 
 		storage.insert(
diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs
index 3ef923851ffd895515722cc03daf2f9b6ce9a7f1..0e2b9bfdfffcf01ae06933d5e02ff6842a80cc77 100644
--- a/substrate/primitives/state-machine/src/lib.rs
+++ b/substrate/primitives/state-machine/src/lib.rs
@@ -125,13 +125,13 @@ impl sp_std::fmt::Display for DefaultError {
 }
 
 pub use crate::{
-	backend::{Backend, IterArgs, KeysIter, PairsIter, StorageIterator},
+	backend::{Backend, BackendTransaction, IterArgs, KeysIter, PairsIter, StorageIterator},
 	error::{Error, ExecutionError},
 	ext::Ext,
 	overlayed_changes::{
 		ChildStorageCollection, IndexOperation, OffchainChangesCollection,
 		OffchainOverlayedChanges, OverlayedChanges, StorageChanges, StorageCollection, StorageKey,
-		StorageTransactionCache, StorageValue,
+		StorageValue,
 	},
 	stats::{StateMachineStats, UsageInfo, UsageUnit},
 	trie_backend::{TrieBackend, TrieBackendBuilder},
@@ -143,7 +143,7 @@ mod std_reexport {
 	pub use crate::{
 		basic::BasicExternalities,
 		error::{Error, ExecutionError},
-		in_memory_backend::{new_in_mem, new_in_mem_hash_key},
+		in_memory_backend::new_in_mem,
 		read_only::{InspectState, ReadOnlyExternalities},
 		testing::TestExternalities,
 		trie_backend::create_proof_check_backend,
@@ -168,6 +168,7 @@ mod execution {
 		traits::{CallContext, CodeExecutor, RuntimeCode},
 	};
 	use sp_externalities::Extensions;
+	use sp_trie::PrefixedMemoryDB;
 	use std::collections::{HashMap, HashSet};
 
 	pub(crate) type CallResult<E> = Result<Vec<u8>, E>;
@@ -176,7 +177,7 @@ mod execution {
 	pub type DefaultHandler<E> = fn(CallResult<E>, CallResult<E>) -> CallResult<E>;
 
 	/// Trie backend with in-memory storage.
-	pub type InMemoryBackend<H> = TrieBackend<MemoryDB<H>, H>;
+	pub type InMemoryBackend<H> = TrieBackend<PrefixedMemoryDB<H>, H>;
 
 	/// Storage backend trust level.
 	#[derive(Debug, Clone)]
@@ -199,9 +200,8 @@ mod execution {
 		exec: &'a Exec,
 		method: &'a str,
 		call_data: &'a [u8],
-		overlay: &'a mut OverlayedChanges,
+		overlay: &'a mut OverlayedChanges<H>,
 		extensions: &'a mut Extensions,
-		storage_transaction_cache: Option<&'a mut StorageTransactionCache<B::Transaction, H>>,
 		runtime_code: &'a RuntimeCode<'a>,
 		stats: StateMachineStats,
 		/// The hash of the block the state machine will be executed on.
@@ -231,7 +231,7 @@ mod execution {
 		/// Creates new substrate state machine.
 		pub fn new(
 			backend: &'a B,
-			overlay: &'a mut OverlayedChanges,
+			overlay: &'a mut OverlayedChanges<H>,
 			exec: &'a Exec,
 			method: &'a str,
 			call_data: &'a [u8],
@@ -246,7 +246,6 @@ mod execution {
 				call_data,
 				extensions,
 				overlay,
-				storage_transaction_cache: None,
 				runtime_code,
 				stats: StateMachineStats::default(),
 				parent_hash: None,
@@ -254,19 +253,6 @@ mod execution {
 			}
 		}
 
-		/// Use given `cache` as storage transaction cache.
-		///
-		/// The cache will be used to cache storage transactions that can be build while executing a
-		/// function in the runtime. For example, when calculating the storage root a transaction is
-		/// build that will be cached.
-		pub fn with_storage_transaction_cache(
-			mut self,
-			cache: Option<&'a mut StorageTransactionCache<B::Transaction, H>>,
-		) -> Self {
-			self.storage_transaction_cache = cache;
-			self
-		}
-
 		/// Set the given `parent_hash` as the hash of the parent block.
 		///
 		/// This will be used for improved logging.
@@ -284,18 +270,11 @@ mod execution {
 		///
 		/// Returns the SCALE encoded result of the executed function.
 		pub fn execute(&mut self) -> Result<Vec<u8>, Box<dyn Error>> {
-			let mut cache = StorageTransactionCache::default();
-
-			let cache = match self.storage_transaction_cache.as_mut() {
-				Some(cache) => cache,
-				None => &mut cache,
-			};
-
 			self.overlay
 				.enter_runtime()
 				.expect("StateMachine is never called from the runtime; qed");
 
-			let mut ext = Ext::new(self.overlay, cache, self.backend, Some(self.extensions));
+			let mut ext = Ext::new(self.overlay, self.backend, Some(self.extensions));
 
 			let ext_id = ext.id;
 
@@ -331,7 +310,7 @@ mod execution {
 	/// Prove execution using the given state backend, overlayed changes, and call executor.
 	pub fn prove_execution<B, H, Exec>(
 		backend: &mut B,
-		overlay: &mut OverlayedChanges,
+		overlay: &mut OverlayedChanges<H>,
 		exec: &Exec,
 		method: &str,
 		call_data: &[u8],
@@ -366,7 +345,7 @@ mod execution {
 	/// blocks (e.g. a transaction at a time), ensure a different method is used.
 	pub fn prove_execution_on_trie_backend<S, H, Exec>(
 		trie_backend: &TrieBackend<S, H>,
-		overlay: &mut OverlayedChanges,
+		overlay: &mut OverlayedChanges<H>,
 		exec: &Exec,
 		method: &str,
 		call_data: &[u8],
@@ -405,7 +384,7 @@ mod execution {
 	pub fn execution_proof_check<H, Exec>(
 		root: H::Out,
 		proof: StorageProof,
-		overlay: &mut OverlayedChanges,
+		overlay: &mut OverlayedChanges<H>,
 		exec: &Exec,
 		method: &str,
 		call_data: &[u8],
@@ -430,7 +409,7 @@ mod execution {
 	/// Check execution proof on proving backend, generated by `prove_execution` call.
 	pub fn execution_proof_check_on_trie_backend<H, Exec>(
 		trie_backend: &TrieBackend<MemoryDB<H>, H>,
-		overlay: &mut OverlayedChanges,
+		overlay: &mut OverlayedChanges<H>,
 		exec: &Exec,
 		method: &str,
 		call_data: &[u8],
@@ -1109,7 +1088,7 @@ mod execution {
 #[cfg(test)]
 mod tests {
 	use super::{backend::AsTrieBackend, ext::Ext, *};
-	use crate::{execution::CallResult, in_memory_backend::new_in_mem_hash_key};
+	use crate::{execution::CallResult, in_memory_backend::new_in_mem};
 	use assert_matches::assert_matches;
 	use codec::Encode;
 	use sp_core::{
@@ -1287,8 +1266,7 @@ mod tests {
 
 		let overlay_limit = overlay.clone();
 		{
-			let mut cache = StorageTransactionCache::default();
-			let mut ext = Ext::new(&mut overlay, &mut cache, backend, None);
+			let mut ext = Ext::new(&mut overlay, backend, None);
 			let _ = ext.clear_prefix(b"ab", None, None);
 		}
 		overlay.commit_transaction().unwrap();
@@ -1311,8 +1289,7 @@ mod tests {
 
 		let mut overlay = overlay_limit;
 		{
-			let mut cache = StorageTransactionCache::default();
-			let mut ext = Ext::new(&mut overlay, &mut cache, backend, None);
+			let mut ext = Ext::new(&mut overlay, backend, None);
 			assert_matches!(
 				ext.clear_prefix(b"ab", Some(1), None).deconstruct(),
 				(Some(_), 1, 3, 1)
@@ -1356,8 +1333,7 @@ mod tests {
 		overlay.set_child_storage(&child_info, b"4".to_vec(), Some(b"1312".to_vec()));
 
 		{
-			let mut cache = StorageTransactionCache::default();
-			let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None);
+			let mut ext = Ext::new(&mut overlay, &backend, None);
 			let r = ext.kill_child_storage(&child_info, Some(2), None);
 			assert_matches!(r.deconstruct(), (Some(_), 2, 6, 2));
 		}
@@ -1392,8 +1368,7 @@ mod tests {
 		];
 		let backend = InMemoryBackend::<BlakeTwo256>::from((initial, StateVersion::default()));
 		let mut overlay = OverlayedChanges::default();
-		let mut cache = StorageTransactionCache::default();
-		let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None);
+		let mut ext = Ext::new(&mut overlay, &backend, None);
 		let r = ext.kill_child_storage(&child_info, Some(0), None).deconstruct();
 		assert_matches!(r, (Some(_), 0, 0, 0));
 		let r = ext
@@ -1422,8 +1397,7 @@ mod tests {
 		];
 		let backend = InMemoryBackend::<BlakeTwo256>::from((initial, StateVersion::default()));
 		let mut overlay = OverlayedChanges::default();
-		let mut cache = StorageTransactionCache::default();
-		let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None);
+		let mut ext = Ext::new(&mut overlay, &backend, None);
 		assert_eq!(ext.kill_child_storage(&child_info, None, None).deconstruct(), (None, 4, 4, 4));
 	}
 
@@ -1431,11 +1405,10 @@ mod tests {
 	fn set_child_storage_works() {
 		let child_info = ChildInfo::new_default(b"sub1");
 		let child_info = &child_info;
-		let state = new_in_mem_hash_key::<BlakeTwo256>();
+		let state = new_in_mem::<BlakeTwo256>();
 		let backend = state.as_trie_backend();
 		let mut overlay = OverlayedChanges::default();
-		let mut cache = StorageTransactionCache::default();
-		let mut ext = Ext::new(&mut overlay, &mut cache, backend, None);
+		let mut ext = Ext::new(&mut overlay, backend, None);
 
 		ext.set_child_storage(child_info, b"abc".to_vec(), b"def".to_vec());
 		assert_eq!(ext.child_storage(child_info, b"abc"), Some(b"def".to_vec()));
@@ -1447,19 +1420,18 @@ mod tests {
 	fn append_storage_works() {
 		let reference_data = vec![b"data1".to_vec(), b"2".to_vec(), b"D3".to_vec(), b"d4".to_vec()];
 		let key = b"key".to_vec();
-		let state = new_in_mem_hash_key::<BlakeTwo256>();
+		let state = new_in_mem::<BlakeTwo256>();
 		let backend = state.as_trie_backend();
 		let mut overlay = OverlayedChanges::default();
-		let mut cache = StorageTransactionCache::default();
 		{
-			let mut ext = Ext::new(&mut overlay, &mut cache, backend, None);
+			let mut ext = Ext::new(&mut overlay, backend, None);
 
 			ext.storage_append(key.clone(), reference_data[0].encode());
 			assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode()));
 		}
 		overlay.start_transaction();
 		{
-			let mut ext = Ext::new(&mut overlay, &mut cache, backend, None);
+			let mut ext = Ext::new(&mut overlay, backend, None);
 
 			for i in reference_data.iter().skip(1) {
 				ext.storage_append(key.clone(), i.encode());
@@ -1468,7 +1440,7 @@ mod tests {
 		}
 		overlay.rollback_transaction().unwrap();
 		{
-			let ext = Ext::new(&mut overlay, &mut cache, backend, None);
+			let ext = Ext::new(&mut overlay, backend, None);
 			assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode()));
 		}
 	}
@@ -1483,14 +1455,13 @@ mod tests {
 		}
 
 		let key = b"events".to_vec();
-		let mut cache = StorageTransactionCache::default();
-		let state = new_in_mem_hash_key::<BlakeTwo256>();
+		let state = new_in_mem::<BlakeTwo256>();
 		let backend = state.as_trie_backend();
 		let mut overlay = OverlayedChanges::default();
 
 		// For example, block initialization with event.
 		{
-			let mut ext = Ext::new(&mut overlay, &mut cache, backend, None);
+			let mut ext = Ext::new(&mut overlay, backend, None);
 			ext.clear_storage(key.as_slice());
 			ext.storage_append(key.clone(), Item::InitializationItem.encode());
 		}
@@ -1498,7 +1469,7 @@ mod tests {
 
 		// For example, first transaction resulted in panic during block building
 		{
-			let mut ext = Ext::new(&mut overlay, &mut cache, backend, None);
+			let mut ext = Ext::new(&mut overlay, backend, None);
 
 			assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode()));
 
@@ -1513,7 +1484,7 @@ mod tests {
 
 		// Then we apply next transaction which is valid this time.
 		{
-			let mut ext = Ext::new(&mut overlay, &mut cache, backend, None);
+			let mut ext = Ext::new(&mut overlay, backend, None);
 
 			assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode()));
 
@@ -1528,7 +1499,7 @@ mod tests {
 
 		// Then only initlaization item and second (committed) item should persist.
 		{
-			let ext = Ext::new(&mut overlay, &mut cache, backend, None);
+			let ext = Ext::new(&mut overlay, backend, None);
 			assert_eq!(
 				ext.storage(key.as_slice()),
 				Some(vec![Item::InitializationItem, Item::CommitedItem].encode()),
@@ -1945,12 +1916,11 @@ mod tests {
 
 		let mut transaction = {
 			let backend = test_trie(state_version, None, None);
-			let mut cache = StorageTransactionCache::default();
-			let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None);
+			let mut ext = Ext::new(&mut overlay, &backend, None);
 			ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec());
 			ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec());
 			ext.storage_root(state_version);
-			cache.transaction.unwrap()
+			overlay.drain_storage_changes(&backend, state_version).unwrap().transaction
 		};
 		let mut duplicate = false;
 		for (k, (value, rc)) in transaction.drain().iter() {
@@ -1982,8 +1952,7 @@ mod tests {
 		assert_eq!(overlay.storage(b"bbb"), None);
 
 		{
-			let mut cache = StorageTransactionCache::default();
-			let mut ext = Ext::new(&mut overlay, &mut cache, backend, None);
+			let mut ext = Ext::new(&mut overlay, backend, None);
 			assert_eq!(ext.storage(b"bbb"), Some(vec![]));
 			assert_eq!(ext.storage(b"ccc"), Some(vec![]));
 			ext.clear_storage(b"ccc");
diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs
index b32df635b177c8c8a8e9da45f82dd7f7f390c1a8..28cfecf1dbd62b5387f79cff8938fc6fc9d4bf16 100644
--- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs
+++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs
@@ -21,7 +21,7 @@ mod changeset;
 mod offchain;
 
 use self::changeset::OverlayedChangeSet;
-use crate::{backend::Backend, stats::StateMachineStats, DefaultError};
+use crate::{backend::Backend, stats::StateMachineStats, BackendTransaction, DefaultError};
 use codec::{Decode, Encode};
 use hash_db::Hasher;
 pub use offchain::OffchainOverlayedChanges;
@@ -34,6 +34,7 @@ use sp_externalities::{Extension, Extensions};
 #[cfg(not(feature = "std"))]
 use sp_std::collections::btree_map::BTreeMap as Map;
 use sp_std::{collections::btree_set::BTreeSet, vec::Vec};
+use sp_trie::{empty_child_trie_root, LayoutV1};
 #[cfg(feature = "std")]
 use std::collections::{hash_map::Entry as MapEntry, HashMap as Map};
 #[cfg(feature = "std")]
@@ -88,8 +89,7 @@ impl Extrinsics {
 /// The set of changes that are overlaid onto the backend.
 ///
 /// It allows changes to be modified using nestable transactions.
-#[derive(Debug, Default, Clone)]
-pub struct OverlayedChanges {
+pub struct OverlayedChanges<H: Hasher> {
 	/// Top level storage changes.
 	top: OverlayedChangeSet,
 	/// Child storage changes. The map key is the child storage key without the common prefix.
@@ -102,6 +102,52 @@ pub struct OverlayedChanges {
 	collect_extrinsics: bool,
 	/// Collect statistic on this execution.
 	stats: StateMachineStats,
+	/// Caches the "storage transaction" that is created while calling `storage_root`.
+	///
+	/// This transaction can be applied to the backend to persist the state changes.
+	storage_transaction_cache: Option<StorageTransactionCache<H>>,
+}
+
+impl<H: Hasher> Default for OverlayedChanges<H> {
+	fn default() -> Self {
+		Self {
+			top: Default::default(),
+			children: Default::default(),
+			offchain: Default::default(),
+			transaction_index_ops: Default::default(),
+			collect_extrinsics: Default::default(),
+			stats: Default::default(),
+			storage_transaction_cache: None,
+		}
+	}
+}
+
+impl<H: Hasher> Clone for OverlayedChanges<H> {
+	fn clone(&self) -> Self {
+		Self {
+			top: self.top.clone(),
+			children: self.children.clone(),
+			offchain: self.offchain.clone(),
+			transaction_index_ops: self.transaction_index_ops.clone(),
+			collect_extrinsics: self.collect_extrinsics,
+			stats: self.stats.clone(),
+			storage_transaction_cache: self.storage_transaction_cache.clone(),
+		}
+	}
+}
+
+impl<H: Hasher> sp_std::fmt::Debug for OverlayedChanges<H> {
+	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+		f.debug_struct("OverlayedChanges")
+			.field("top", &self.top)
+			.field("children", &self.children)
+			.field("offchain", &self.offchain)
+			.field("transaction_index_ops", &self.transaction_index_ops)
+			.field("collect_extrinsics", &self.collect_extrinsics)
+			.field("stats", &self.stats)
+			.field("storage_transaction_cache", &self.storage_transaction_cache)
+			.finish()
+	}
 }
 
 /// Transaction index operation.
@@ -129,7 +175,7 @@ pub enum IndexOperation {
 ///
 /// This contains all the changes to the storage and transactions to apply theses changes to the
 /// backend.
-pub struct StorageChanges<Transaction, H: Hasher> {
+pub struct StorageChanges<H: Hasher> {
 	/// All changes to the main storage.
 	///
 	/// A value of `None` means that it was deleted.
@@ -142,7 +188,7 @@ pub struct StorageChanges<Transaction, H: Hasher> {
 	/// [`main_storage_changes`](StorageChanges::main_storage_changes) and from
 	/// [`child_storage_changes`](StorageChanges::child_storage_changes).
 	/// [`offchain_storage_changes`](StorageChanges::offchain_storage_changes).
-	pub transaction: Transaction,
+	pub transaction: BackendTransaction<H>,
 	/// The storage root after applying the transaction.
 	pub transaction_storage_root: H::Out,
 	/// Changes to the transaction index,
@@ -151,7 +197,7 @@ pub struct StorageChanges<Transaction, H: Hasher> {
 }
 
 #[cfg(feature = "std")]
-impl<Transaction, H: Hasher> StorageChanges<Transaction, H> {
+impl<H: Hasher> StorageChanges<H> {
 	/// Deconstruct into the inner values
 	pub fn into_inner(
 		self,
@@ -159,7 +205,7 @@ impl<Transaction, H: Hasher> StorageChanges<Transaction, H> {
 		StorageCollection,
 		ChildStorageCollection,
 		OffchainChangesCollection,
-		Transaction,
+		BackendTransaction<H>,
 		H::Out,
 		Vec<IndexOperation>,
 	) {
@@ -174,44 +220,60 @@ impl<Transaction, H: Hasher> StorageChanges<Transaction, H> {
 	}
 }
 
+impl<H: Hasher> Default for StorageChanges<H> {
+	fn default() -> Self {
+		Self {
+			main_storage_changes: Default::default(),
+			child_storage_changes: Default::default(),
+			offchain_storage_changes: Default::default(),
+			transaction: Default::default(),
+			transaction_storage_root: Default::default(),
+			#[cfg(feature = "std")]
+			transaction_index_changes: Default::default(),
+		}
+	}
+}
+
 /// Storage transactions are calculated as part of the `storage_root`.
 /// These transactions can be reused for importing the block into the
 /// storage. So, we cache them to not require a recomputation of those transactions.
-pub struct StorageTransactionCache<Transaction, H: Hasher> {
+struct StorageTransactionCache<H: Hasher> {
 	/// Contains the changes for the main and the child storages as one transaction.
-	pub(crate) transaction: Option<Transaction>,
+	transaction: BackendTransaction<H>,
 	/// The storage root after applying the transaction.
-	pub(crate) transaction_storage_root: Option<H::Out>,
+	transaction_storage_root: H::Out,
 }
 
-impl<Transaction, H: Hasher> StorageTransactionCache<Transaction, H> {
-	/// Reset the cached transactions.
-	pub fn reset(&mut self) {
-		*self = Self::default();
+impl<H: Hasher> StorageTransactionCache<H> {
+	fn into_inner(self) -> (BackendTransaction<H>, H::Out) {
+		(self.transaction, self.transaction_storage_root)
 	}
 }
 
-impl<Transaction, H: Hasher> Default for StorageTransactionCache<Transaction, H> {
-	fn default() -> Self {
-		Self { transaction: None, transaction_storage_root: None }
+impl<H: Hasher> Clone for StorageTransactionCache<H> {
+	fn clone(&self) -> Self {
+		Self {
+			transaction: self.transaction.clone(),
+			transaction_storage_root: self.transaction_storage_root,
+		}
 	}
 }
 
-impl<Transaction: Default, H: Hasher> Default for StorageChanges<Transaction, H> {
-	fn default() -> Self {
-		Self {
-			main_storage_changes: Default::default(),
-			child_storage_changes: Default::default(),
-			offchain_storage_changes: Default::default(),
-			transaction: Default::default(),
-			transaction_storage_root: Default::default(),
-			#[cfg(feature = "std")]
-			transaction_index_changes: Default::default(),
-		}
+impl<H: Hasher> sp_std::fmt::Debug for StorageTransactionCache<H> {
+	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+		let mut debug = f.debug_struct("StorageTransactionCache");
+
+		#[cfg(feature = "std")]
+		debug.field("transaction_storage_root", &self.transaction_storage_root);
+
+		#[cfg(not(feature = "std"))]
+		debug.field("transaction_storage_root", &self.transaction_storage_root.as_ref());
+
+		debug.finish()
 	}
 }
 
-impl OverlayedChanges {
+impl<H: Hasher> OverlayedChanges<H> {
 	/// Whether no changes are contained in the top nor in any of the child changes.
 	pub fn is_empty(&self) -> bool {
 		self.top.is_empty() && self.children.is_empty()
@@ -234,6 +296,12 @@ impl OverlayedChanges {
 		})
 	}
 
+	/// Should be called when there are changes that require to reset the
+	/// `storage_transaction_cache`.
+	fn mark_dirty(&mut self) {
+		self.storage_transaction_cache = None;
+	}
+
 	/// Returns mutable reference to current value.
 	/// If there is no value in the overlay, the given callback is used to initiate the value.
 	/// Warning this function registers a change, so the mutable reference MUST be modified.
@@ -245,6 +313,8 @@ impl OverlayedChanges {
 		key: &[u8],
 		init: impl Fn() -> StorageValue,
 	) -> &mut StorageValue {
+		self.mark_dirty();
+
 		let value = self.top.modify(key.to_vec(), init, self.extrinsic_index());
 
 		// if the value was deleted initialise it back with an empty vec
@@ -266,6 +336,8 @@ impl OverlayedChanges {
 	///
 	/// Can be rolled back or committed when called inside a transaction.
 	pub fn set_storage(&mut self, key: StorageKey, val: Option<StorageValue>) {
+		self.mark_dirty();
+
 		let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0);
 		self.stats.tally_write_overlay(size_write);
 		self.top.set(key, val, self.extrinsic_index());
@@ -282,6 +354,8 @@ impl OverlayedChanges {
 		key: StorageKey,
 		val: Option<StorageValue>,
 	) {
+		self.mark_dirty();
+
 		let extrinsic_index = self.extrinsic_index();
 		let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0);
 		self.stats.tally_write_overlay(size_write);
@@ -300,6 +374,8 @@ impl OverlayedChanges {
 	///
 	/// Can be rolled back or committed when called inside a transaction.
 	pub(crate) fn clear_child_storage(&mut self, child_info: &ChildInfo) -> u32 {
+		self.mark_dirty();
+
 		let extrinsic_index = self.extrinsic_index();
 		let storage_key = child_info.storage_key().to_vec();
 		let top = &self.top;
@@ -316,6 +392,8 @@ impl OverlayedChanges {
 	///
 	/// Can be rolled back or committed when called inside a transaction.
 	pub(crate) fn clear_prefix(&mut self, prefix: &[u8]) -> u32 {
+		self.mark_dirty();
+
 		self.top.clear_where(|key, _| key.starts_with(prefix), self.extrinsic_index())
 	}
 
@@ -323,6 +401,8 @@ impl OverlayedChanges {
 	///
 	/// Can be rolled back or committed when called inside a transaction
 	pub(crate) fn clear_child_prefix(&mut self, child_info: &ChildInfo, prefix: &[u8]) -> u32 {
+		self.mark_dirty();
+
 		let extrinsic_index = self.extrinsic_index();
 		let storage_key = child_info.storage_key().to_vec();
 		let top = &self.top;
@@ -364,6 +444,8 @@ impl OverlayedChanges {
 	/// Any changes made during that transaction are discarded. Returns an error if
 	/// there is no open transaction that can be rolled back.
 	pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> {
+		self.mark_dirty();
+
 		self.top.rollback_transaction()?;
 		retain_map(&mut self.children, |_, (changeset, _)| {
 			changeset
@@ -432,32 +514,6 @@ impl OverlayedChanges {
 		Ok(())
 	}
 
-	/// Consume all changes (top + children) and return them.
-	///
-	/// After calling this function no more changes are contained in this changeset.
-	///
-	/// Panics:
-	/// Panics if `transaction_depth() > 0`
-	fn drain_committed(
-		&mut self,
-	) -> (
-		impl Iterator<Item = (StorageKey, Option<StorageValue>)>,
-		impl Iterator<
-			Item = (
-				StorageKey,
-				(impl Iterator<Item = (StorageKey, Option<StorageValue>)>, ChildInfo),
-			),
-		>,
-	) {
-		use sp_std::mem::take;
-		(
-			take(&mut self.top).drain_commited(),
-			take(&mut self.children)
-				.into_iter()
-				.map(|(key, (val, info))| (key, (val.drain_commited(), info))),
-		)
-	}
-
 	/// Consume all changes (top + children) and return them.
 	///
 	/// After calling this function no more changes are contained in this changeset.
@@ -495,42 +551,33 @@ impl OverlayedChanges {
 		&self.transaction_index_ops
 	}
 
-	/// Convert this instance with all changes into a [`StorageChanges`] instance.
-	#[cfg(feature = "std")]
-	pub fn into_storage_changes<B: Backend<H>, H: Hasher>(
-		mut self,
-		backend: &B,
-		mut cache: StorageTransactionCache<B::Transaction, H>,
-		state_version: StateVersion,
-	) -> Result<StorageChanges<B::Transaction, H>, DefaultError>
-	where
-		H::Out: Ord + Encode + 'static,
-	{
-		self.drain_storage_changes(backend, &mut cache, state_version)
-	}
-
 	/// Drain all changes into a [`StorageChanges`] instance. Leave empty overlay in place.
-	pub fn drain_storage_changes<B: Backend<H>, H: Hasher>(
+	pub fn drain_storage_changes<B: Backend<H>>(
 		&mut self,
 		backend: &B,
-		cache: &mut StorageTransactionCache<B::Transaction, H>,
 		state_version: StateVersion,
-	) -> Result<StorageChanges<B::Transaction, H>, DefaultError>
+	) -> Result<StorageChanges<H>, DefaultError>
 	where
 		H::Out: Ord + Encode + 'static,
 	{
-		// If the transaction does not exist, we generate it.
-		if cache.transaction.is_none() {
-			self.storage_root(backend, cache, state_version);
-		}
+		let (transaction, transaction_storage_root) = match self.storage_transaction_cache.take() {
+			Some(cache) => cache.into_inner(),
+			// If the transaction does not exist, we generate it.
+			None => {
+				self.storage_root(backend, state_version);
+				self.storage_transaction_cache
+					.take()
+					.expect("`storage_transaction_cache` was just initialized; qed")
+					.into_inner()
+			},
+		};
 
-		let (transaction, transaction_storage_root) = cache
-			.transaction
-			.take()
-			.and_then(|t| cache.transaction_storage_root.take().map(|tr| (t, tr)))
-			.expect("Transaction was be generated as part of `storage_root`; qed");
+		use sp_std::mem::take;
+		let main_storage_changes = take(&mut self.top).drain_commited();
+		let child_storage_changes = take(&mut self.children)
+			.into_iter()
+			.map(|(key, (val, info))| (key, (val.drain_commited(), info)));
 
-		let (main_storage_changes, child_storage_changes) = self.drain_committed();
 		let offchain_storage_changes = self.offchain_drain_committed().collect();
 
 		#[cfg(feature = "std")]
@@ -562,29 +609,29 @@ impl OverlayedChanges {
 	/// Changes that are made outside of extrinsics, are marked with
 	/// `NO_EXTRINSIC_INDEX` index.
 	fn extrinsic_index(&self) -> Option<u32> {
-		match self.collect_extrinsics {
-			true => Some(
-				self.storage(EXTRINSIC_INDEX)
-					.and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx).ok()))
-					.unwrap_or(NO_EXTRINSIC_INDEX),
-			),
-			false => None,
-		}
+		self.collect_extrinsics.then(|| {
+			self.storage(EXTRINSIC_INDEX)
+				.and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx).ok()))
+				.unwrap_or(NO_EXTRINSIC_INDEX)
+		})
 	}
 
 	/// Generate the storage root using `backend` and all changes
 	/// as seen by the current transaction.
 	///
-	/// Returns the storage root and caches storage transaction in the given `cache`.
-	pub fn storage_root<H: Hasher, B: Backend<H>>(
-		&self,
+	/// Returns the storage root and whether it was already cached.
+	pub fn storage_root<B: Backend<H>>(
+		&mut self,
 		backend: &B,
-		cache: &mut StorageTransactionCache<B::Transaction, H>,
 		state_version: StateVersion,
-	) -> H::Out
+	) -> (H::Out, bool)
 	where
 		H::Out: Ord + Encode,
 	{
+		if let Some(cache) = &self.storage_transaction_cache {
+			return (cache.transaction_storage_root, true)
+		}
+
 		let delta = self.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..])));
 		let child_delta = self.children().map(|(changes, info)| {
 			(info, changes.map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))))
@@ -592,10 +639,72 @@ impl OverlayedChanges {
 
 		let (root, transaction) = backend.full_storage_root(delta, child_delta, state_version);
 
-		cache.transaction = Some(transaction);
-		cache.transaction_storage_root = Some(root);
+		self.storage_transaction_cache =
+			Some(StorageTransactionCache { transaction, transaction_storage_root: root });
+
+		(root, false)
+	}
+
+	/// Generate the child storage root using `backend` and all child changes
+	/// as seen by the current transaction.
+	///
+	/// Returns the child storage root and whether it was already cached.
+	pub fn child_storage_root<B: Backend<H>>(
+		&mut self,
+		child_info: &ChildInfo,
+		backend: &B,
+		state_version: StateVersion,
+	) -> Result<(H::Out, bool), B::Error>
+	where
+		H::Out: Ord + Encode + Decode,
+	{
+		let storage_key = child_info.storage_key();
+		let prefixed_storage_key = child_info.prefixed_storage_key();
+
+		if self.storage_transaction_cache.is_some() {
+			let root = self
+				.storage(prefixed_storage_key.as_slice())
+				.map(|v| Ok(v.map(|v| v.to_vec())))
+				.or_else(|| backend.storage(prefixed_storage_key.as_slice()).map(Some).transpose())
+				.transpose()?
+				.flatten()
+				.and_then(|k| Decode::decode(&mut &k[..]).ok())
+				// V1 is equivalent to V0 on empty root.
+				.unwrap_or_else(empty_child_trie_root::<LayoutV1<H>>);
+
+			return Ok((root, true))
+		}
 
-		root
+		let root = if let Some((changes, info)) = self.child_changes(storage_key) {
+			let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref)));
+			Some(backend.child_storage_root(info, delta, state_version))
+		} else {
+			None
+		};
+
+		let root = if let Some((root, is_empty, _)) = root {
+			// We store update in the overlay in order to be able to use
+			// 'self.storage_transaction' cache. This is brittle as it rely on Ext only querying
+			// the trie backend for storage root.
+			// A better design would be to manage 'child_storage_transaction' in a
+			// similar way as 'storage_transaction' but for each child trie.
+			self.set_storage(prefixed_storage_key.into_inner(), (!is_empty).then(|| root.encode()));
+
+			self.mark_dirty();
+
+			root
+		} else {
+			// empty overlay
+			let root = backend
+				.storage(prefixed_storage_key.as_slice())?
+				.and_then(|k| Decode::decode(&mut &k[..]).ok())
+				// V1 is equivalent to V0 on empty root.
+				.unwrap_or_else(empty_child_trie_root::<LayoutV1<H>>);
+
+			root
+		};
+
+		Ok((root, false))
 	}
 
 	/// Returns an iterator over the keys (in lexicographic order) following `key` (excluding `key`)
@@ -639,7 +748,7 @@ impl OverlayedChanges {
 }
 
 #[cfg(feature = "std")]
-impl From<sp_core::storage::Storage> for OverlayedChanges {
+impl<H: Hasher> From<sp_core::storage::Storage> for OverlayedChanges<H> {
 	fn from(storage: sp_core::storage::Storage) -> Self {
 		Self {
 			top: storage.top.into(),
@@ -742,7 +851,8 @@ impl<'a> OverlayedExtensions<'a> {
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use crate::{ext::Ext, InMemoryBackend};
+	use crate::{ext::Ext, new_in_mem, InMemoryBackend};
+	use array_bytes::bytes2hex;
 	use sp_core::{traits::Externalities, Blake2Hasher};
 	use std::collections::BTreeMap;
 
@@ -755,7 +865,7 @@ mod tests {
 
 	#[test]
 	fn overlayed_storage_works() {
-		let mut overlayed = OverlayedChanges::default();
+		let mut overlayed = OverlayedChanges::<Blake2Hasher>::default();
 
 		let key = vec![42, 69, 169, 142];
 
@@ -790,7 +900,7 @@ mod tests {
 	fn offchain_overlayed_storage_transactions_works() {
 		use sp_core::offchain::STORAGE_PREFIX;
 		fn check_offchain_content(
-			state: &OverlayedChanges,
+			state: &OverlayedChanges<Blake2Hasher>,
 			nb_commit: usize,
 			expected: Vec<(Vec<u8>, Option<Vec<u8>>)>,
 		) {
@@ -867,18 +977,61 @@ mod tests {
 		overlay.set_storage(b"dogglesworth".to_vec(), Some(b"cat".to_vec()));
 		overlay.set_storage(b"doug".to_vec(), None);
 
-		let mut cache = StorageTransactionCache::default();
-		let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None);
-		let root = array_bytes::hex2bytes_unchecked(
-			"39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa",
-		);
+		{
+			let mut ext = Ext::new(&mut overlay, &backend, None);
+			let root = "39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa";
+
+			assert_eq!(bytes2hex("", &ext.storage_root(state_version)), root);
+			// Calling a second time should use it from the cache
+			assert_eq!(bytes2hex("", &ext.storage_root(state_version)), root);
+		}
+
+		// Check that the storage root is recalculated
+		overlay.set_storage(b"doug2".to_vec(), Some(b"yes".to_vec()));
 
-		assert_eq!(&ext.storage_root(state_version)[..], &root);
+		let mut ext = Ext::new(&mut overlay, &backend, None);
+		let root = "5c0a4e35cb967de785e1cb8743e6f24b6ff6d45155317f2078f6eb3fc4ff3e3d";
+		assert_eq!(bytes2hex("", &ext.storage_root(state_version)), root);
+	}
+
+	#[test]
+	fn overlayed_child_storage_root_works() {
+		let state_version = StateVersion::default();
+		let child_info = ChildInfo::new_default(b"Child1");
+		let child_info = &child_info;
+		let backend = new_in_mem::<Blake2Hasher>();
+		let mut overlay = OverlayedChanges::<Blake2Hasher>::default();
+		overlay.start_transaction();
+		overlay.set_child_storage(child_info, vec![20], Some(vec![20]));
+		overlay.set_child_storage(child_info, vec![30], Some(vec![30]));
+		overlay.set_child_storage(child_info, vec![40], Some(vec![40]));
+		overlay.commit_transaction().unwrap();
+		overlay.set_child_storage(child_info, vec![10], Some(vec![10]));
+		overlay.set_child_storage(child_info, vec![30], None);
+
+		{
+			let mut ext = Ext::new(&mut overlay, &backend, None);
+			let child_root = "c02965e1df4dc5baf6977390ce67dab1d7a9b27a87c1afe27b50d29cc990e0f5";
+			let root = "eafb765909c3ed5afd92a0c564acf4620d0234b31702e8e8e9b48da72a748838";
+
+			assert_eq!(
+				bytes2hex("", &ext.child_storage_root(child_info, state_version)),
+				child_root,
+			);
+
+			assert_eq!(bytes2hex("", &ext.storage_root(state_version)), root);
+
+			// Calling a second time should use it from the cache
+			assert_eq!(
+				bytes2hex("", &ext.child_storage_root(child_info, state_version)),
+				child_root,
+			);
+		}
 	}
 
 	#[test]
 	fn extrinsic_changes_are_collected() {
-		let mut overlay = OverlayedChanges::default();
+		let mut overlay = OverlayedChanges::<Blake2Hasher>::default();
 		overlay.set_collect_extrinsics(true);
 
 		overlay.start_transaction();
@@ -919,7 +1072,7 @@ mod tests {
 
 	#[test]
 	fn next_storage_key_change_works() {
-		let mut overlay = OverlayedChanges::default();
+		let mut overlay = OverlayedChanges::<Blake2Hasher>::default();
 		overlay.start_transaction();
 		overlay.set_storage(vec![20], Some(vec![20]));
 		overlay.set_storage(vec![30], Some(vec![30]));
@@ -960,7 +1113,7 @@ mod tests {
 		let child_info = ChildInfo::new_default(b"Child1");
 		let child_info = &child_info;
 		let child = child_info.storage_key();
-		let mut overlay = OverlayedChanges::default();
+		let mut overlay = OverlayedChanges::<Blake2Hasher>::default();
 		overlay.start_transaction();
 		overlay.set_child_storage(child_info, vec![20], Some(vec![20]));
 		overlay.set_child_storage(child_info, vec![30], Some(vec![30]));
diff --git a/substrate/primitives/state-machine/src/testing.rs b/substrate/primitives/state-machine/src/testing.rs
index 78e956952f723e7a1a2fdee20340ad6a2585a139..0eb7b6d1118f9aa3cff1caee089d526b3f711d94 100644
--- a/substrate/primitives/state-machine/src/testing.rs
+++ b/substrate/primitives/state-machine/src/testing.rs
@@ -23,8 +23,8 @@ use std::{
 };
 
 use crate::{
-	backend::Backend, ext::Ext, InMemoryBackend, OverlayedChanges, StorageKey,
-	StorageTransactionCache, StorageValue, TrieBackendBuilder,
+	backend::Backend, ext::Ext, InMemoryBackend, OverlayedChanges, StorageKey, StorageValue,
+	TrieBackendBuilder,
 };
 
 use hash_db::{HashDB, Hasher};
@@ -36,7 +36,7 @@ use sp_core::{
 	},
 };
 use sp_externalities::{Extension, ExtensionStore, Extensions};
-use sp_trie::StorageProof;
+use sp_trie::{PrefixedMemoryDB, StorageProof};
 
 /// Simple HashMap-based Externalities impl.
 pub struct TestExternalities<H>
@@ -45,10 +45,8 @@ where
 	H::Out: codec::Codec + Ord,
 {
 	/// The overlay changed storage.
-	overlay: OverlayedChanges,
+	overlay: OverlayedChanges<H>,
 	offchain_db: TestPersistentOffchainDB,
-	storage_transaction_cache:
-		StorageTransactionCache<<InMemoryBackend<H> as Backend<H>>::Transaction, H>,
 	/// Storage backend.
 	pub backend: InMemoryBackend<H>,
 	/// Extensions.
@@ -64,12 +62,7 @@ where
 {
 	/// Get externalities implementation.
 	pub fn ext(&mut self) -> Ext<H, InMemoryBackend<H>> {
-		Ext::new(
-			&mut self.overlay,
-			&mut self.storage_transaction_cache,
-			&self.backend,
-			Some(&mut self.extensions),
-		)
+		Ext::new(&mut self.overlay, &self.backend, Some(&mut self.extensions))
 	}
 
 	/// Create a new instance of `TestExternalities` with storage.
@@ -112,13 +105,12 @@ where
 			offchain_db,
 			extensions: Default::default(),
 			backend,
-			storage_transaction_cache: Default::default(),
 			state_version,
 		}
 	}
 
 	/// Returns the overlayed changes.
-	pub fn overlayed_changes(&self) -> &OverlayedChanges {
+	pub fn overlayed_changes(&self) -> &OverlayedChanges<H> {
 		&self.overlay
 	}
 
@@ -165,32 +157,50 @@ where
 	/// This can be used as a fast way to restore the storage state from a backup because the trie
 	/// does not need to be computed.
 	pub fn from_raw_snapshot(
-		&mut self,
-		raw_storage: Vec<(H::Out, (Vec<u8>, i32))>,
+		raw_storage: Vec<(Vec<u8>, (Vec<u8>, i32))>,
 		storage_root: H::Out,
-	) {
-		for (k, (v, ref_count)) in raw_storage {
+		state_version: StateVersion,
+	) -> Self {
+		let mut backend = PrefixedMemoryDB::default();
+
+		for (key, (v, ref_count)) in raw_storage {
+			let mut hash = H::Out::default();
+			let hash_len = hash.as_ref().len();
+
+			if key.len() < hash_len {
+				log::warn!("Invalid key in `from_raw_snapshot`: {key:?}");
+				continue
+			}
+
+			hash.as_mut().copy_from_slice(&key[(key.len() - hash_len)..]);
+
 			// Each time .emplace is called the internal MemoryDb ref count increments.
 			// Repeatedly call emplace to initialise the ref count to the correct value.
 			for _ in 0..ref_count {
-				self.backend.backend_storage_mut().emplace(k, hash_db::EMPTY_PREFIX, v.clone());
+				backend.emplace(hash, (&key[..(key.len() - hash_len)], None), v.clone());
 			}
 		}
-		self.backend.set_root(storage_root);
+
+		Self {
+			backend: TrieBackendBuilder::new(backend, storage_root).build(),
+			overlay: Default::default(),
+			offchain_db: Default::default(),
+			extensions: Default::default(),
+			state_version,
+		}
 	}
 
 	/// Drains the underlying raw storage key/values and returns the root hash.
 	///
 	/// Useful for backing up the storage in a format that can be quickly re-loaded.
-	///
-	/// Note: This DB will be inoperable after this call.
-	pub fn into_raw_snapshot(mut self) -> (Vec<(H::Out, (Vec<u8>, i32))>, H::Out) {
+	pub fn into_raw_snapshot(mut self) -> (Vec<(Vec<u8>, (Vec<u8>, i32))>, H::Out) {
 		let raw_key_values = self
 			.backend
 			.backend_storage_mut()
 			.drain()
 			.into_iter()
-			.collect::<Vec<(H::Out, (Vec<u8>, i32))>>();
+			.filter(|(_, (_, r))| *r > 0)
+			.collect::<Vec<(Vec<u8>, (Vec<u8>, i32))>>();
 
 		(raw_key_values, *self.backend.root())
 	}
@@ -220,11 +230,7 @@ where
 	///
 	/// This will panic if there are still open transactions.
 	pub fn commit_all(&mut self) -> Result<(), String> {
-		let changes = self.overlay.drain_storage_changes::<_, _>(
-			&self.backend,
-			&mut Default::default(),
-			self.state_version,
-		)?;
+		let changes = self.overlay.drain_storage_changes(&self.backend, self.state_version)?;
 
 		self.backend
 			.apply_transaction(changes.transaction_storage_root, changes.transaction);
@@ -248,12 +254,8 @@ where
 		let proving_backend = TrieBackendBuilder::wrap(&self.backend)
 			.with_recorder(Default::default())
 			.build();
-		let mut proving_ext = Ext::new(
-			&mut self.overlay,
-			&mut self.storage_transaction_cache,
-			&proving_backend,
-			Some(&mut self.extensions),
-		);
+		let mut proving_ext =
+			Ext::new(&mut self.overlay, &proving_backend, Some(&mut self.extensions));
 
 		let outcome = sp_externalities::set_and_run_with_externalities(&mut proving_ext, execute);
 		let proof = proving_backend.extract_proof().expect("Failed to extract storage proof");
@@ -409,36 +411,25 @@ mod tests {
 		original_ext.insert_child(child_info.clone(), b"cattytown".to_vec(), b"is_dark".to_vec());
 		original_ext.insert_child(child_info.clone(), b"doggytown".to_vec(), b"is_sunny".to_vec());
 
-		// Call emplace on one of the keys to increment the MemoryDb refcount, so we can check
-		// that it is intact in the recovered_ext.
-		let keys = original_ext.backend.backend_storage_mut().keys();
-		let expected_ref_count = 5;
-		let ref_count_key = keys.into_iter().next().unwrap().0;
-		for _ in 0..expected_ref_count - 1 {
-			original_ext.backend.backend_storage_mut().emplace(
-				ref_count_key,
-				hash_db::EMPTY_PREFIX,
-				// We can use anything for the 'value' because it does not affect behavior when
-				// emplacing an existing key.
-				(&[0u8; 32]).to_vec(),
-			);
-		}
-		let refcount = original_ext
-			.backend
-			.backend_storage()
-			.raw(&ref_count_key, hash_db::EMPTY_PREFIX)
-			.unwrap()
-			.1;
-		assert_eq!(refcount, expected_ref_count);
+		// Apply the backend to itself again to increase the ref count of all nodes.
+		original_ext.backend.apply_transaction(
+			*original_ext.backend.root(),
+			original_ext.backend.clone().into_storage(),
+		);
+
+		// Ensure all have the correct ref counrt
+		assert!(original_ext.backend.backend_storage().keys().values().all(|r| *r == 2));
 
 		// Drain the raw storage and root.
 		let root = *original_ext.backend.root();
 		let (raw_storage, storage_root) = original_ext.into_raw_snapshot();
 
 		// Load the raw storage and root into a new TestExternalities.
-		let mut recovered_ext =
-			TestExternalities::<BlakeTwo256>::from((Default::default(), Default::default()));
-		recovered_ext.from_raw_snapshot(raw_storage, storage_root);
+		let recovered_ext = TestExternalities::<BlakeTwo256>::from_raw_snapshot(
+			raw_storage,
+			storage_root,
+			Default::default(),
+		);
 
 		// Check the storage root is the same as the original
 		assert_eq!(root, *recovered_ext.backend.root());
@@ -458,14 +449,8 @@ mod tests {
 			Some(b"is_sunny".to_vec())
 		);
 
-		// Check the refcount of the key with > 1 refcount is correct.
-		let refcount = recovered_ext
-			.backend
-			.backend_storage()
-			.raw(&ref_count_key, hash_db::EMPTY_PREFIX)
-			.unwrap()
-			.1;
-		assert_eq!(refcount, expected_ref_count);
+		// Ensure all have the correct ref count after importing
+		assert!(recovered_ext.backend.backend_storage().keys().values().all(|r| *r == 2));
 	}
 
 	#[test]
diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs
index b7940fa8c39df24e7fc21670dcda40f9d86cc8c6..cc7132181f90a0ceab30a66b2d78a54798031b25 100644
--- a/substrate/primitives/state-machine/src/trie_backend.rs
+++ b/substrate/primitives/state-machine/src/trie_backend.rs
@@ -30,6 +30,7 @@ use codec::Codec;
 use hash_db::HashDB;
 use hash_db::Hasher;
 use sp_core::storage::{ChildInfo, StateVersion};
+use sp_trie::PrefixedMemoryDB;
 #[cfg(feature = "std")]
 use sp_trie::{
 	cache::{LocalTrieCache, TrieCache},
@@ -377,7 +378,6 @@ where
 	H::Out: Ord + Codec,
 {
 	type Error = crate::DefaultError;
-	type Transaction = S::Overlay;
 	type TrieBackendStorage = S;
 	type RawIter = crate::trie_backend_essence::RawIter<S, H, C>;
 
@@ -458,7 +458,7 @@ where
 		&self,
 		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
 		state_version: StateVersion,
-	) -> (H::Out, Self::Transaction)
+	) -> (H::Out, PrefixedMemoryDB<H>)
 	where
 		H::Out: Ord,
 	{
@@ -470,7 +470,7 @@ where
 		child_info: &ChildInfo,
 		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
 		state_version: StateVersion,
-	) -> (H::Out, bool, Self::Transaction)
+	) -> (H::Out, bool, PrefixedMemoryDB<H>)
 	where
 		H::Out: Ord,
 	{
@@ -529,7 +529,7 @@ pub mod tests {
 	use sp_trie::{
 		cache::{CacheSize, SharedTrieCache},
 		trie_types::{TrieDBBuilder, TrieDBMutBuilderV0, TrieDBMutBuilderV1},
-		KeySpacedDBMut, PrefixedKey, PrefixedMemoryDB, Trie, TrieCache, TrieMut,
+		KeySpacedDBMut, PrefixedMemoryDB, Trie, TrieCache, TrieMut,
 	};
 	use std::iter;
 	use trie_db::NodeCodec;
@@ -1187,7 +1187,7 @@ pub mod tests {
 			(Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()),
 			(Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()),
 		];
-		let in_memory = new_in_mem::<BlakeTwo256, PrefixedKey<BlakeTwo256>>();
+		let in_memory = new_in_mem::<BlakeTwo256>();
 		let in_memory = in_memory.update(contents, state_version);
 		let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()];
 		let in_memory_root = in_memory
@@ -1292,7 +1292,7 @@ pub mod tests {
 					.collect(),
 			),
 		];
-		let in_memory = new_in_mem::<BlakeTwo256, PrefixedKey<BlakeTwo256>>();
+		let in_memory = new_in_mem::<BlakeTwo256>();
 		let in_memory = in_memory.update(contents, state_version);
 		let child_storage_keys = vec![child_info_1.to_owned()];
 		let in_memory_root = in_memory
@@ -1480,7 +1480,7 @@ pub mod tests {
 			(Some(child_info_1.clone()), vec![(key.clone(), Some(child_trie_1_val.clone()))]),
 			(Some(child_info_2.clone()), vec![(key.clone(), Some(child_trie_2_val.clone()))]),
 		];
-		let in_memory = new_in_mem::<BlakeTwo256, PrefixedKey<BlakeTwo256>>();
+		let in_memory = new_in_mem::<BlakeTwo256>();
 		let in_memory = in_memory.update(contents, state_version);
 		let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()];
 		let in_memory_root = in_memory
diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs
index 22c76b56deb05f0f16fafb04dbb79b149c4e23b3..4bb51f4a134370795da03d0bbfefc347d72f12cd 100644
--- a/substrate/primitives/state-machine/src/trie_backend_essence.rs
+++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs
@@ -19,7 +19,7 @@
 //! from storage.
 
 use crate::{
-	backend::{Consolidate, IterArgs, StorageIterator},
+	backend::{IterArgs, StorageIterator},
 	trie_backend::TrieCacheProvider,
 	warn, StorageKey, StorageValue,
 };
@@ -35,7 +35,8 @@ use sp_trie::{
 	child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_hash,
 	read_child_trie_value, read_trie_value,
 	trie_types::{TrieDBBuilder, TrieError},
-	DBValue, KeySpacedDB, NodeCodec, Trie, TrieCache, TrieDBRawIterator, TrieRecorder,
+	DBValue, KeySpacedDB, NodeCodec, PrefixedMemoryDB, Trie, TrieCache, TrieDBRawIterator,
+	TrieRecorder,
 };
 #[cfg(feature = "std")]
 use std::{collections::HashMap, sync::Arc};
@@ -621,8 +622,8 @@ where
 		&self,
 		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
 		state_version: StateVersion,
-	) -> (H::Out, S::Overlay) {
-		let mut write_overlay = S::Overlay::default();
+	) -> (H::Out, PrefixedMemoryDB<H>) {
+		let mut write_overlay = PrefixedMemoryDB::default();
 
 		let root = self.with_recorder_and_cache_for_storage_root(None, |recorder, cache| {
 			let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay);
@@ -654,11 +655,11 @@ where
 		child_info: &ChildInfo,
 		delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
 		state_version: StateVersion,
-	) -> (H::Out, bool, S::Overlay) {
+	) -> (H::Out, bool, PrefixedMemoryDB<H>) {
 		let default_root = match child_info.child_type() {
 			ChildType::ParentKeyId => empty_child_trie_root::<sp_trie::LayoutV1<H>>(),
 		};
-		let mut write_overlay = S::Overlay::default();
+		let mut write_overlay = PrefixedMemoryDB::default();
 		let child_root = match self.child_root(child_info) {
 			Ok(Some(hash)) => hash,
 			Ok(None) => default_root,
@@ -707,7 +708,7 @@ where
 
 pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage<H>, H: 'a + Hasher> {
 	storage: &'a S,
-	overlay: &'a mut S::Overlay,
+	overlay: &'a mut PrefixedMemoryDB<H>,
 }
 
 impl<'a, S: 'a + TrieBackendStorage<H>, H: 'a + Hasher> AsHashDB<H, DBValue>
@@ -722,7 +723,7 @@ impl<'a, S: 'a + TrieBackendStorage<H>, H: 'a + Hasher> AsHashDB<H, DBValue>
 }
 
 impl<'a, S: TrieBackendStorage<H>, H: Hasher> Ephemeral<'a, S, H> {
-	pub fn new(storage: &'a S, overlay: &'a mut S::Overlay) -> Self {
+	pub fn new(storage: &'a S, overlay: &'a mut PrefixedMemoryDB<H>) -> Self {
 		Ephemeral { storage, overlay }
 	}
 }
@@ -768,16 +769,11 @@ impl<'a, S: 'a + TrieBackendStorage<H>, H: Hasher> HashDBRef<H, DBValue> for Eph
 
 /// Key-value pairs storage that is used by trie backend essence.
 pub trait TrieBackendStorage<H: Hasher>: Send + Sync {
-	/// Type of in-memory overlay.
-	type Overlay: HashDB<H, DBValue> + Default + Consolidate;
-
 	/// Get the value stored at key.
 	fn get(&self, key: &H::Out, prefix: Prefix) -> Result<Option<DBValue>>;
 }
 
 impl<T: TrieBackendStorage<H>, H: Hasher> TrieBackendStorage<H> for &T {
-	type Overlay = T::Overlay;
-
 	fn get(&self, key: &H::Out, prefix: Prefix) -> Result<Option<DBValue>> {
 		(*self).get(key, prefix)
 	}
@@ -786,8 +782,6 @@ impl<T: TrieBackendStorage<H>, H: Hasher> TrieBackendStorage<H> for &T {
 // This implementation is used by normal storage trie clients.
 #[cfg(feature = "std")]
 impl<H: Hasher> TrieBackendStorage<H> for Arc<dyn Storage<H>> {
-	type Overlay = sp_trie::PrefixedMemoryDB<H>;
-
 	fn get(&self, key: &H::Out, prefix: Prefix) -> Result<Option<DBValue>> {
 		Storage::<H>::get(std::ops::Deref::deref(self), key, prefix)
 	}
@@ -798,8 +792,6 @@ where
 	H: Hasher,
 	KF: sp_trie::KeyFunction<H> + Send + Sync,
 {
-	type Overlay = Self;
-
 	fn get(&self, key: &H::Out, prefix: Prefix) -> Result<Option<DBValue>> {
 		Ok(hash_db::HashDB::get(self, key, prefix))
 	}
diff --git a/substrate/test-utils/client/src/client_ext.rs b/substrate/test-utils/client/src/client_ext.rs
index a258faa5e03e39608fb757f87a8e41e84a47edd9..8efa7b5f07f8d71664a79a7b3633afc0f7ca3fb3 100644
--- a/substrate/test-utils/client/src/client_ext.rs
+++ b/substrate/test-utils/client/src/client_ext.rs
@@ -87,10 +87,9 @@ where
 
 /// This implementation is required, because of the weird api requirements around `BlockImport`.
 #[async_trait::async_trait]
-impl<Block: BlockT, T, Transaction> ClientBlockImportExt<Block> for std::sync::Arc<T>
+impl<Block: BlockT, T> ClientBlockImportExt<Block> for std::sync::Arc<T>
 where
-	for<'r> &'r T: BlockImport<Block, Error = ConsensusError, Transaction = Transaction>,
-	Transaction: Send + 'static,
+	for<'r> &'r T: BlockImport<Block, Error = ConsensusError>,
 	T: Send + Sync,
 {
 	async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> {
@@ -153,7 +152,6 @@ where
 	RA: Send,
 	B: Send + Sync,
 	E: Send,
-	<Self as BlockImport<Block>>::Transaction: Send,
 {
 	async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> {
 		let (header, extrinsics) = block.deconstruct();
diff --git a/substrate/test-utils/runtime/client/src/block_builder_ext.rs b/substrate/test-utils/runtime/client/src/block_builder_ext.rs
index a9b0d49f3543e83c9d526ea02293403abdf3da22..78863209e33e9fe98f4332b46ba60e06e12fd1ad 100644
--- a/substrate/test-utils/runtime/client/src/block_builder_ext.rs
+++ b/substrate/test-utils/runtime/client/src/block_builder_ext.rs
@@ -49,11 +49,7 @@ impl<'a, A, B> BlockBuilderExt
 	for sc_block_builder::BlockBuilder<'a, substrate_test_runtime::Block, A, B>
 where
 	A: ProvideRuntimeApi<substrate_test_runtime::Block> + 'a,
-	A::Api: BlockBuilderApi<substrate_test_runtime::Block>
-		+ ApiExt<
-			substrate_test_runtime::Block,
-			StateBackend = backend::StateBackendFor<B, substrate_test_runtime::Block>,
-		>,
+	A::Api: BlockBuilderApi<substrate_test_runtime::Block> + ApiExt<substrate_test_runtime::Block>,
 	B: backend::Backend<substrate_test_runtime::Block>,
 {
 	fn push_transfer(
diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs
index 0cc32e50956c814ea07561d406f9de87651a0d1e..b116c8556815f3eef73e08252faab9d9f586ff24 100644
--- a/substrate/test-utils/runtime/src/lib.rs
+++ b/substrate/test-utils/runtime/src/lib.rs
@@ -816,10 +816,8 @@ fn test_witness(proof: StorageProof, root: crate::Hash) {
 	let db: sp_trie::MemoryDB<crate::Hashing> = proof.into_memory_db();
 	let backend = sp_state_machine::TrieBackendBuilder::<_, crate::Hashing>::new(db, root).build();
 	let mut overlay = sp_state_machine::OverlayedChanges::default();
-	let mut cache = sp_state_machine::StorageTransactionCache::<_, _>::default();
 	let mut ext = sp_state_machine::Ext::new(
 		&mut overlay,
-		&mut cache,
 		&backend,
 		#[cfg(feature = "std")]
 		None,
diff --git a/substrate/utils/frame/benchmarking-cli/src/block/bench.rs b/substrate/utils/frame/benchmarking-cli/src/block/bench.rs
index 960056991a190cf6c8882768543c44e66be46c2f..c9a7fb1ad33dfd3d5145b7ca671bc36d932119bc 100644
--- a/substrate/utils/frame/benchmarking-cli/src/block/bench.rs
+++ b/substrate/utils/frame/benchmarking-cli/src/block/bench.rs
@@ -77,7 +77,7 @@ where
 		+ UsageProvider<Block>
 		+ BlockBackend<Block>
 		+ HeaderBackend<Block>,
-	C::Api: ApiExt<Block, StateBackend = BA::State> + BlockBuilderApi<Block>,
+	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
 {
 	/// Returns a new [`Self`] from the arguments.
 	pub fn new(client: Arc<C>, params: BenchmarkParams) -> Self {
diff --git a/substrate/utils/frame/benchmarking-cli/src/block/cmd.rs b/substrate/utils/frame/benchmarking-cli/src/block/cmd.rs
index 90b71cd78c2a44e053f305d1cf3d6bdfe7dff10f..ee12c1c5dac33fc60ecec0726734d8f9482540a8 100644
--- a/substrate/utils/frame/benchmarking-cli/src/block/cmd.rs
+++ b/substrate/utils/frame/benchmarking-cli/src/block/cmd.rs
@@ -90,7 +90,7 @@ impl BlockCmd {
 			+ StorageProvider<Block, BA>
 			+ UsageProvider<Block>
 			+ HeaderBackend<Block>,
-		C::Api: ApiExt<Block, StateBackend = BA::State> + BlockBuilderApi<Block>,
+		C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
 	{
 		// Put everything in the benchmark type to have the generic types handy.
 		Benchmark::new(client, self.params.clone()).run()
diff --git a/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs b/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs
index facde14adab5908175fdf155b0d3463f43e383f2..693b9f99f08e88151df43db025dec3f19b44d2e4 100644
--- a/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs
+++ b/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs
@@ -76,7 +76,7 @@ where
 	C: BlockBuilderProvider<BA, Block, C>
 		+ ProvideRuntimeApi<Block>
 		+ sp_blockchain::HeaderBackend<Block>,
-	C::Api: ApiExt<Block, StateBackend = BA::State> + BlockBuilderApi<Block>,
+	C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
 {
 	/// Create a new [`Self`] from the arguments.
 	pub fn new(
diff --git a/substrate/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs b/substrate/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs
index 1001958fe0d2866a6459933d6567cab09a9cb2a2..4c3a6ed1bcd7816ba388c3d07c33fe9a8cfa7361 100644
--- a/substrate/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs
+++ b/substrate/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs
@@ -97,7 +97,7 @@ impl ExtrinsicCmd {
 		C: BlockBuilderProvider<BA, Block, C>
 			+ ProvideRuntimeApi<Block>
 			+ sp_blockchain::HeaderBackend<Block>,
-		C::Api: ApiExt<Block, StateBackend = BA::State> + BlockBuilderApi<Block>,
+		C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
 	{
 		// Short circuit if --list was specified.
 		if self.params.list {
diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/cmd.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/cmd.rs
index 70e64cc2b66ad2d5d6c4a26e2950b77e0a40c4b7..5a4c37b1f6f07301766a6693fde250c769c50e58 100644
--- a/substrate/utils/frame/benchmarking-cli/src/overhead/cmd.rs
+++ b/substrate/utils/frame/benchmarking-cli/src/overhead/cmd.rs
@@ -111,7 +111,7 @@ impl OverheadCmd {
 		C: BlockBuilderProvider<BA, Block, C>
 			+ ProvideRuntimeApi<Block>
 			+ sp_blockchain::HeaderBackend<Block>,
-		C::Api: ApiExt<Block, StateBackend = BA::State> + BlockBuilderApi<Block>,
+		C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
 	{
 		if ext_builder.pallet() != "system" || ext_builder.extrinsic() != "remark" {
 			return Err(format!("The extrinsic builder is required to build `System::Remark` extrinsics but builds `{}` extrinsics instead", ext_builder.name()).into());
diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml
index 82a258e0ec39bf3234a9c63331946e77f0e28790..75f25bf322f903910e34f993b59c7bec9eefb476 100644
--- a/substrate/utils/frame/remote-externalities/Cargo.toml
+++ b/substrate/utils/frame/remote-externalities/Cargo.toml
@@ -16,8 +16,8 @@ jsonrpsee = { version = "0.16.2", features = ["http-client"] }
 codec = { package = "parity-scale-codec", version = "3.6.1" }
 log = "0.4.17"
 serde = "1.0.163"
-frame-support = { version = "4.0.0-dev", optional = true, path = "../../../frame/support" }
 sp-core = { version = "21.0.0", path = "../../../primitives/core" }
+sp-state-machine = { version = "0.28.0", path = "../../../primitives/state-machine" }
 sp-io = { version = "23.0.0", path = "../../../primitives/io" }
 sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" }
 tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] }
@@ -29,9 +29,7 @@ spinners = "4.1.0"
 tokio-retry = "0.3.0"
 
 [dev-dependencies]
-frame-support = { version = "4.0.0-dev", path = "../../../frame/support" }
-pallet-elections-phragmen = { version = "5.0.0-dev", path = "../../../frame/elections-phragmen" }
-tracing-subscriber = { version = "0.3.16", features = ["env-filter"] }
+sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" }
 
 [features]
-remote-test = ["frame-support"]
+remote-test = []
diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs
index 761f3c88590466937376e29a18ce6bdfcd726793..072ea6ef5e5970a6fe377e742e04d623f0c34607 100644
--- a/substrate/utils/frame/remote-externalities/src/lib.rs
+++ b/substrate/utils/frame/remote-externalities/src/lib.rs
@@ -36,10 +36,12 @@ use sp_core::{
 		well_known_keys::{is_default_child_storage_key, DEFAULT_CHILD_STORAGE_KEY_PREFIX},
 		ChildInfo, ChildType, PrefixedStorageKey, StorageData, StorageKey,
 	},
-	H256,
 };
-pub use sp_io::TestExternalities;
-use sp_runtime::{traits::Block as BlockT, StateVersion};
+use sp_runtime::{
+	traits::{Block as BlockT, HashingFor},
+	StateVersion,
+};
+use sp_state_machine::TestExternalities;
 use spinners::{Spinner, Spinners};
 use std::{
 	cmp::max,
@@ -58,7 +60,7 @@ type SnapshotVersion = Compact<u16>;
 
 const LOG_TARGET: &str = "remote-ext";
 const DEFAULT_HTTP_ENDPOINT: &str = "https://rpc.polkadot.io:443";
-const SNAPSHOT_VERSION: SnapshotVersion = Compact(2);
+const SNAPSHOT_VERSION: SnapshotVersion = Compact(3);
 
 /// The snapshot that we store on disk.
 #[derive(Decode, Encode)]
@@ -67,16 +69,16 @@ struct Snapshot<B: BlockT> {
 	state_version: StateVersion,
 	block_hash: B::Hash,
 	// <Vec<Key, (Value, MemoryDbRefCount)>>
-	raw_storage: Vec<(H256, (Vec<u8>, i32))>,
-	storage_root: H256,
+	raw_storage: Vec<(Vec<u8>, (Vec<u8>, i32))>,
+	storage_root: B::Hash,
 }
 
 impl<B: BlockT> Snapshot<B> {
 	pub fn new(
 		state_version: StateVersion,
 		block_hash: B::Hash,
-		raw_storage: Vec<(H256, (Vec<u8>, i32))>,
-		storage_root: H256,
+		raw_storage: Vec<(Vec<u8>, (Vec<u8>, i32))>,
+		storage_root: B::Hash,
 	) -> Self {
 		Self {
 			snapshot_version: SNAPSHOT_VERSION,
@@ -91,21 +93,14 @@ impl<B: BlockT> Snapshot<B> {
 		let bytes = fs::read(path).map_err(|_| "fs::read failed.")?;
 		// The first item in the SCALE encoded struct bytes is the snapshot version. We decode and
 		// check that first, before proceeding to decode the rest of the snapshot.
-		let maybe_version: Result<SnapshotVersion, _> = Decode::decode(&mut &*bytes);
-		match maybe_version {
-			Ok(snapshot_version) => {
-				if snapshot_version != SNAPSHOT_VERSION {
-					return Err(
-						"Unsupported snapshot version detected. Please create a new snapshot.",
-					)
-				}
-				match Decode::decode(&mut &*bytes) {
-					Ok(snapshot) => return Ok(snapshot),
-					Err(_) => Err("Decode failed"),
-				}
-			},
-			Err(_) => Err("Decode failed"),
+		let snapshot_version = SnapshotVersion::decode(&mut &*bytes)
+			.map_err(|_| "Failed to decode snapshot version")?;
+
+		if snapshot_version != SNAPSHOT_VERSION {
+			return Err("Unsupported snapshot version detected. Please create a new snapshot.")
 		}
+
+		Decode::decode(&mut &*bytes).map_err(|_| "Decode failed")
 	}
 }
 
@@ -113,13 +108,13 @@ impl<B: BlockT> Snapshot<B> {
 /// bits and pieces to it, and can be loaded remotely.
 pub struct RemoteExternalities<B: BlockT> {
 	/// The inner externalities.
-	pub inner_ext: TestExternalities,
+	pub inner_ext: TestExternalities<HashingFor<B>>,
 	/// The block hash it which we created this externality env.
 	pub block_hash: B::Hash,
 }
 
 impl<B: BlockT> Deref for RemoteExternalities<B> {
-	type Target = TestExternalities;
+	type Target = TestExternalities<HashingFor<B>>;
 	fn deref(&self) -> &Self::Target {
 		&self.inner_ext
 	}
@@ -319,8 +314,6 @@ pub struct Builder<B: BlockT> {
 	overwrite_state_version: Option<StateVersion>,
 }
 
-// NOTE: ideally we would use `DefaultNoBound` here, but not worth bringing in frame-support for
-// that.
 impl<B: BlockT> Default for Builder<B> {
 	fn default() -> Self {
 		Self {
@@ -576,7 +569,7 @@ where
 		&self,
 		prefix: StorageKey,
 		at: B::Hash,
-		pending_ext: &mut TestExternalities,
+		pending_ext: &mut TestExternalities<HashingFor<B>>,
 	) -> Result<Vec<KeyValue>, &'static str> {
 		let start = Instant::now();
 		let mut sp = Spinner::with_timer(Spinners::Dots, "Scraping keys...".into());
@@ -768,7 +761,7 @@ where
 	async fn load_child_remote(
 		&self,
 		top_kv: &[KeyValue],
-		pending_ext: &mut TestExternalities,
+		pending_ext: &mut TestExternalities<HashingFor<B>>,
 	) -> Result<ChildKeyValues, &'static str> {
 		let child_roots = top_kv
 			.into_iter()
@@ -826,7 +819,7 @@ where
 	/// cache, we can also optimize further.
 	async fn load_top_remote(
 		&self,
-		pending_ext: &mut TestExternalities,
+		pending_ext: &mut TestExternalities<HashingFor<B>>,
 	) -> Result<TopKeyValues, &'static str> {
 		let config = self.as_online();
 		let at = self
@@ -926,7 +919,9 @@ where
 	/// `load_child_remote`.
 	///
 	/// Must be called after `init_remote_client`.
-	async fn load_remote_and_maybe_save(&mut self) -> Result<TestExternalities, &'static str> {
+	async fn load_remote_and_maybe_save(
+		&mut self,
+	) -> Result<TestExternalities<HashingFor<B>>, &'static str> {
 		let state_version =
 			StateApi::<B::Hash>::runtime_version(self.as_online().rpc_client(), None)
 				.await
@@ -966,13 +961,11 @@ where
 			std::fs::write(path, encoded).map_err(|_| "fs::write failed")?;
 
 			// pending_ext was consumed when creating the snapshot, need to reinitailize it
-			let mut pending_ext = TestExternalities::new_with_code_and_state(
-				Default::default(),
-				Default::default(),
+			return Ok(TestExternalities::from_raw_snapshot(
+				raw_storage,
+				storage_root,
 				self.overwrite_state_version.unwrap_or(state_version),
-			);
-			pending_ext.from_raw_snapshot(raw_storage, storage_root);
-			return Ok(pending_ext)
+			))
 		}
 
 		Ok(pending_ext)
@@ -995,12 +988,11 @@ where
 		let Snapshot { snapshot_version: _, block_hash, state_version, raw_storage, storage_root } =
 			Snapshot::<B>::load(&config.state_snapshot.path)?;
 
-		let mut inner_ext = TestExternalities::new_with_code_and_state(
-			Default::default(),
-			Default::default(),
+		let inner_ext = TestExternalities::from_raw_snapshot(
+			raw_storage,
+			storage_root,
 			self.overwrite_state_version.unwrap_or(state_version),
 		);
-		inner_ext.from_raw_snapshot(raw_storage, storage_root);
 		sp.stop_with_message(format!("✅ Loaded snapshot ({:.2}s)", start.elapsed().as_secs_f32()));
 
 		Ok(RemoteExternalities { inner_ext, block_hash })
@@ -1099,17 +1091,12 @@ where
 
 #[cfg(test)]
 mod test_prelude {
-	use tracing_subscriber::EnvFilter;
-
 	pub(crate) use super::*;
 	pub(crate) use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash};
 	pub(crate) type Block = RawBlock<ExtrinsicWrapper<Hash>>;
 
 	pub(crate) fn init_logger() {
-		let _ = tracing_subscriber::fmt()
-			.with_env_filter(EnvFilter::from_default_env())
-			.with_level(true)
-			.try_init();
+		let _ = sp_tracing::try_init_simple();
 	}
 }
 
@@ -1369,9 +1356,6 @@ mod remote_tests {
 			.filter(|p| p.path().file_name().unwrap_or_default() == CACHE)
 			.collect::<Vec<_>>();
 
-		let snap: Snapshot<Block> = Builder::<Block>::new().load_snapshot(CACHE.into()).unwrap();
-		assert!(matches!(snap, Snapshot { raw_storage, .. } if raw_storage.len() > 0));
-
 		assert!(to_delete.len() == 1);
 		let to_delete = to_delete.first().unwrap();
 		assert!(std::fs::metadata(to_delete.path()).unwrap().size() > 1);
@@ -1401,9 +1385,6 @@ mod remote_tests {
 			.filter(|p| p.path().file_name().unwrap_or_default() == CACHE)
 			.collect::<Vec<_>>();
 
-		let snap: Snapshot<Block> = Builder::<Block>::new().load_snapshot(CACHE.into()).unwrap();
-		assert!(matches!(snap, Snapshot { raw_storage, .. } if raw_storage.len() > 0));
-
 		assert!(to_delete.len() == 1);
 		let to_delete = to_delete.first().unwrap();
 		assert!(std::fs::metadata(to_delete.path()).unwrap().size() > 1);
diff --git a/substrate/utils/frame/remote-externalities/test_data/proxy_test b/substrate/utils/frame/remote-externalities/test_data/proxy_test
index de8105ee152df9e6f0970243f7789d37c1461734..f0b1b4f5af40bc8a159c9ee250bee7849cababae 100644
Binary files a/substrate/utils/frame/remote-externalities/test_data/proxy_test and b/substrate/utils/frame/remote-externalities/test_data/proxy_test differ
diff --git a/substrate/utils/frame/try-runtime/cli/src/commands/fast_forward.rs b/substrate/utils/frame/try-runtime/cli/src/commands/fast_forward.rs
index ea3ae532ca74d1649564a47aead9028d4d087489..f1dee16debe7339edc4f3cc1ba01829f2fbacc99 100644
--- a/substrate/utils/frame/try-runtime/cli/src/commands/fast_forward.rs
+++ b/substrate/utils/frame/try-runtime/cli/src/commands/fast_forward.rs
@@ -25,11 +25,11 @@ use sc_executor::{sp_wasm_interface::HostFunctions, WasmExecutor};
 use serde::de::DeserializeOwned;
 use sp_core::H256;
 use sp_inherents::{InherentData, InherentDataProvider};
-use sp_io::TestExternalities;
 use sp_runtime::{
-	traits::{Header, NumberFor, One},
+	traits::{HashingFor, Header, NumberFor, One},
 	Digest,
 };
+use sp_state_machine::TestExternalities;
 use std::{fmt::Debug, str::FromStr};
 use substrate_rpc_client::{ws_client, ChainApi};
 
@@ -92,8 +92,8 @@ where
 }
 
 /// Call `method` with `data` and return the result. `externalities` will not change.
-async fn dry_run<T: Decode, Block: BlockT, HostFns: HostFunctions>(
-	externalities: &TestExternalities,
+fn dry_run<T: Decode, Block: BlockT, HostFns: HostFunctions>(
+	externalities: &TestExternalities<HashingFor<Block>>,
 	executor: &WasmExecutor<HostFns>,
 	method: &'static str,
 	data: &[u8],
@@ -111,7 +111,7 @@ async fn dry_run<T: Decode, Block: BlockT, HostFns: HostFunctions>(
 
 /// Call `method` with `data` and actually save storage changes to `externalities`.
 async fn run<Block: BlockT, HostFns: HostFunctions>(
-	externalities: &mut TestExternalities,
+	externalities: &mut TestExternalities<HashingFor<Block>>,
 	executor: &WasmExecutor<HostFns>,
 	method: &'static str,
 	data: &[u8],
@@ -124,11 +124,8 @@ async fn run<Block: BlockT, HostFns: HostFunctions>(
 		full_extensions(executor.clone()),
 	)?;
 
-	let storage_changes = changes.drain_storage_changes(
-		&externalities.backend,
-		&mut Default::default(),
-		externalities.state_version,
-	)?;
+	let storage_changes =
+		changes.drain_storage_changes(&externalities.backend, externalities.state_version)?;
 
 	externalities
 		.backend
@@ -143,7 +140,7 @@ async fn next_empty_block<
 	HostFns: HostFunctions,
 	BBIP: BlockBuildingInfoProvider<Block, Option<(InherentData, Digest)>>,
 >(
-	externalities: &mut TestExternalities,
+	externalities: &mut TestExternalities<HashingFor<Block>>,
 	executor: &WasmExecutor<HostFns>,
 	parent_height: NumberFor<Block>,
 	parent_hash: Block::Hash,
@@ -182,8 +179,7 @@ async fn next_empty_block<
 			executor,
 			"BlockBuilder_inherent_extrinsics",
 			&inherent_data.encode(),
-		)
-		.await?;
+		)?;
 	}
 
 	for xt in &extrinsics {
@@ -196,8 +192,7 @@ async fn next_empty_block<
 		executor,
 		"BlockBuilder_finalize_block",
 		&[0u8; 0],
-	)
-	.await?;
+	)?;
 
 	run::<Block, _>(externalities, executor, "BlockBuilder_finalize_block", &[0u8; 0]).await?;
 
diff --git a/substrate/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/substrate/utils/frame/try-runtime/cli/src/commands/follow_chain.rs
index cfcdb4654c8c53573d1f4e71385ddb574dd777d2..53db5e64346326944554b82dcb572f3fe0299e7c 100644
--- a/substrate/utils/frame/try-runtime/cli/src/commands/follow_chain.rs
+++ b/substrate/utils/frame/try-runtime/cli/src/commands/follow_chain.rs
@@ -177,7 +177,6 @@ where
 		let storage_changes = changes
 			.drain_storage_changes(
 				&state_ext.backend,
-				&mut Default::default(),
 				// Note that in case a block contains a runtime upgrade, state version could
 				// potentially be incorrect here, this is very niche and would only result in
 				// unaligned roots, so this use case is ignored for now.
diff --git a/substrate/utils/frame/try-runtime/cli/src/lib.rs b/substrate/utils/frame/try-runtime/cli/src/lib.rs
index 2adf6658795558ad5a2a9474df124bd572266cf8..73952ce816af426e8ff6749879269cd2fe378abe 100644
--- a/substrate/utils/frame/try-runtime/cli/src/lib.rs
+++ b/substrate/utils/frame/try-runtime/cli/src/lib.rs
@@ -28,7 +28,6 @@ use crate::block_building_info::BlockBuildingInfoProvider;
 use parity_scale_codec::Decode;
 use remote_externalities::{
 	Builder, Mode, OfflineConfig, OnlineConfig, RemoteExternalities, SnapshotConfig,
-	TestExternalities,
 };
 use sc_cli::{
 	execution_method_from_cli, CliConfiguration, RuntimeVersion, WasmExecutionMethod,
@@ -38,7 +37,6 @@ use sc_cli::{
 use sc_executor::{
 	sp_wasm_interface::HostFunctions, HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY,
 };
-use sp_api::HashT;
 use sp_core::{
 	hexdisplay::HexDisplay,
 	offchain::{
@@ -53,10 +51,12 @@ use sp_externalities::Extensions;
 use sp_inherents::InherentData;
 use sp_keystore::{testing::MemoryKeystore, KeystoreExt};
 use sp_runtime::{
-	traits::{BlakeTwo256, Block as BlockT, NumberFor},
+	traits::{BlakeTwo256, Block as BlockT, Hash as HashT, HashingFor, NumberFor},
 	DeserializeOwned, Digest,
 };
-use sp_state_machine::{CompactProof, OverlayedChanges, StateMachine, TrieBackendBuilder};
+use sp_state_machine::{
+	CompactProof, OverlayedChanges, StateMachine, TestExternalities, TrieBackendBuilder,
+};
 use sp_version::StateVersion;
 use std::{fmt::Debug, path::PathBuf, str::FromStr};
 
@@ -514,7 +514,7 @@ pub(crate) fn build_executor<H: HostFunctions>(shared: &SharedParams) -> WasmExe
 /// Ensure that the given `ext` is compiled with `try-runtime`
 fn ensure_try_runtime<Block: BlockT, HostFns: HostFunctions>(
 	executor: &WasmExecutor<HostFns>,
-	ext: &mut TestExternalities,
+	ext: &mut TestExternalities<HashingFor<Block>>,
 ) -> bool {
 	use sp_api::RuntimeApiInfo;
 	let final_code = ext
@@ -532,12 +532,12 @@ fn ensure_try_runtime<Block: BlockT, HostFns: HostFunctions>(
 /// Execute the given `method` and `data` on top of `ext`, returning the results (encoded) and the
 /// state `changes`.
 pub(crate) fn state_machine_call<Block: BlockT, HostFns: HostFunctions>(
-	ext: &TestExternalities,
+	ext: &TestExternalities<HashingFor<Block>>,
 	executor: &WasmExecutor<HostFns>,
 	method: &'static str,
 	data: &[u8],
 	mut extensions: Extensions,
-) -> sc_cli::Result<(OverlayedChanges, Vec<u8>)> {
+) -> sc_cli::Result<(OverlayedChanges<HashingFor<Block>>, Vec<u8>)> {
 	let mut changes = Default::default();
 	let encoded_results = StateMachine::new(
 		&ext.backend,
@@ -561,13 +561,13 @@ pub(crate) fn state_machine_call<Block: BlockT, HostFns: HostFunctions>(
 ///
 /// Make sure [`LOG_TARGET`] is enabled in logging.
 pub(crate) fn state_machine_call_with_proof<Block: BlockT, HostFns: HostFunctions>(
-	ext: &TestExternalities,
+	ext: &TestExternalities<HashingFor<Block>>,
 	executor: &WasmExecutor<HostFns>,
 	method: &'static str,
 	data: &[u8],
 	mut extensions: Extensions,
 	maybe_export_proof: Option<PathBuf>,
-) -> sc_cli::Result<(OverlayedChanges, Vec<u8>)> {
+) -> sc_cli::Result<(OverlayedChanges<HashingFor<Block>>, Vec<u8>)> {
 	use parity_scale_codec::Encode;
 
 	let mut changes = Default::default();
@@ -624,7 +624,7 @@ pub(crate) fn state_machine_call_with_proof<Block: BlockT, HostFns: HostFunction
 	let proof_size = proof.encoded_size();
 	let compact_proof = proof
 		.clone()
-		.into_compact_proof::<sp_runtime::traits::BlakeTwo256>(pre_root)
+		.into_compact_proof::<HashingFor<Block>>(pre_root)
 		.map_err(|e| {
 			log::error!(target: LOG_TARGET, "failed to generate compact proof {}: {:?}", method, e);
 			e