From 5eb816d7a685826282632842dbaec7c89d284333 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Bastian=20K=C3=B6cher?= <git@kchr.de>
Date: Tue, 11 Jul 2023 16:21:38 +0200
Subject: [PATCH] Removal of execution strategies (#14387)

* Start

* More work!

* Moar

* More changes

* More fixes

* More worrk

* More fixes

* More fixes to make it compile

* Adds `NoOffchainStorage`

* Pass the extensions

* Small basti making small progress

* Fix merge errors and remove `ExecutionContext`

* Move registration of `ReadRuntimeVersionExt` to `ExecutionExtension`

Instead of registering `ReadRuntimeVersionExt` in `sp-state-machine` it is moved to
`ExecutionExtension` which provides the default extensions.

* Fix compilation

* Register the global extensions inside runtime api instance

* Fixes

* Fix `generate_initial_session_keys` by passing the keystore extension

* Fix the grandpa tests

* Fix more tests

* Fix more tests

* Don't set any heap pages if there isn't an override

* Fix small fallout

* FMT

* Fix tests

* More tests

* Offchain worker custom extensions

* More fixes

* Make offchain tx pool creation reusable

Introduces an `OffchainTransactionPoolFactory` for creating offchain transactions pools that can be
registered in the runtime externalities context. This factory will be required for a later pr to
make the creation of offchain transaction pools easier.

* Fixes

* Fixes

* Set offchain transaction pool in BABE before using it in the runtime

* Add the `offchain_tx_pool` to Grandpa as well

* Fix the nodes

* Print some error when using the old warnings

* Fix merge issues

* Fix compilation

* Rename `babe_link`

* Rename to `offchain_tx_pool_factory`

* Cleanup

* FMT

* Fix benchmark name

* Fix `try-runtime`

* Remove `--execution` CLI args

* Make clippy happy

* Forward bls functions

* Fix docs

* Update UI tests

* Update client/api/src/execution_extensions.rs

Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com>

* Apply suggestions from code review

Co-authored-by: Koute <koute@users.noreply.github.com>

* Update client/cli/src/params/import_params.rs

Co-authored-by: Koute <koute@users.noreply.github.com>

* Update client/api/src/execution_extensions.rs

Co-authored-by: Koute <koute@users.noreply.github.com>

* Pass the offchain storage to the MMR RPC

* Update client/api/src/execution_extensions.rs

Co-authored-by: Sebastian Kunert <skunert49@gmail.com>

* Review comments

* Fixes

---------

Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com>
Co-authored-by: Koute <koute@users.noreply.github.com>
Co-authored-by: Sebastian Kunert <skunert49@gmail.com>
---
 substrate/Cargo.lock                          |  17 +-
 substrate/bin/node-template/node/Cargo.toml   |   2 +
 .../bin/node-template/node/src/service.rs     |  29 +-
 .../pallets/template/src/weights.rs           |   1 -
 substrate/bin/node/bench/src/construct.rs     |  16 +-
 substrate/bin/node/bench/src/import.rs        |  23 +-
 substrate/bin/node/bench/src/main.rs          |  37 ++-
 substrate/bin/node/bench/src/txpool.rs        |   4 +-
 substrate/bin/node/cli/Cargo.toml             |   1 +
 .../bin/node/cli/benches/block_production.rs  |  11 -
 .../bin/node/cli/benches/transaction_pool.rs  |   9 -
 substrate/bin/node/cli/src/service.rs         |  80 ++++--
 .../node/cli/tests/benchmark_block_works.rs   |   2 +-
 substrate/bin/node/rpc/src/lib.rs             |  35 ++-
 substrate/bin/node/testing/src/bench.rs       |  65 +----
 substrate/client/api/Cargo.toml               |   1 -
 substrate/client/api/src/call_executor.rs     |   9 +-
 .../client/api/src/execution_extensions.rs    | 187 +-----------
 substrate/client/api/src/lib.rs               |   2 +-
 substrate/client/block-builder/src/lib.rs     |  34 +--
 substrate/client/cli/src/arg_enums.rs         |  24 --
 substrate/client/cli/src/config.rs            |  17 --
 .../client/cli/src/params/import_params.rs    |  56 ++--
 substrate/client/cli/src/runner.rs            |   1 -
 .../client/consensus/aura/src/import_queue.rs |   6 +-
 substrate/client/consensus/babe/Cargo.toml    |   1 +
 .../client/consensus/babe/rpc/Cargo.toml      |   1 +
 .../client/consensus/babe/rpc/src/lib.rs      |  27 +-
 substrate/client/consensus/babe/src/lib.rs    |  76 +++--
 substrate/client/consensus/babe/src/tests.rs  |   4 +
 substrate/client/consensus/grandpa/Cargo.toml |   1 +
 .../consensus/grandpa/src/environment.rs      |  12 +-
 substrate/client/consensus/grandpa/src/lib.rs |  16 +-
 .../client/consensus/grandpa/src/tests.rs     |  22 +-
 substrate/client/consensus/pow/src/lib.rs     |   5 +-
 .../merkle-mountain-range/rpc/src/lib.rs      |  47 ++--
 substrate/client/offchain/Cargo.toml          |   5 +
 substrate/client/offchain/src/api.rs          | 114 +-------
 substrate/client/offchain/src/lib.rs          | 265 ++++++++++++------
 substrate/client/rpc-spec-v2/Cargo.toml       |   1 +
 .../rpc-spec-v2/src/chain_head/chain_head.rs  |   8 +-
 .../src/chain_head/subscription/inner.rs      |   1 -
 .../rpc-spec-v2/src/chain_head/test_utils.rs  |   8 +
 .../rpc-spec-v2/src/chain_head/tests.rs       |   4 +-
 substrate/client/rpc/Cargo.toml               |   1 +
 substrate/client/rpc/src/author/mod.rs        |  11 +-
 substrate/client/rpc/src/author/tests.rs      |   3 +-
 substrate/client/rpc/src/state/state_full.rs  |   8 +-
 substrate/client/rpc/src/state/tests.rs       |   2 +-
 substrate/client/service/Cargo.toml           |   1 -
 substrate/client/service/src/builder.rs       |  47 +---
 .../service/src/client/call_executor.rs       |  54 +---
 substrate/client/service/src/client/client.rs |  37 +--
 substrate/client/service/src/config.rs        |   3 -
 substrate/client/service/src/lib.rs           |   4 +-
 .../client/service/test/src/client/mod.rs     |  60 ++--
 substrate/client/service/test/src/lib.rs      |   1 -
 substrate/client/statement-store/src/lib.rs   |  12 +-
 .../client/transaction-pool/api/src/lib.rs    |  58 +++-
 substrate/client/transaction-pool/src/lib.rs  |   9 +-
 substrate/frame/benchmarking/README.md        |   1 -
 substrate/frame/nfts/src/weights.rs           |   1 -
 substrate/primitives/api/Cargo.toml           |   2 +
 .../api/proc-macro/src/decl_runtime_apis.rs   |  34 +--
 .../api/proc-macro/src/impl_runtime_apis.rs   |  39 ++-
 .../proc-macro/src/mock_impl_runtime_apis.rs  |  49 +---
 substrate/primitives/api/src/lib.rs           |  29 +-
 .../primitives/api/test/benches/bench.rs      |  11 +-
 .../api/test/tests/runtime_calls.rs           |  39 +--
 .../tests/ui/mock_only_self_reference.stderr  |  39 ---
 .../application-crypto/test/src/ecdsa.rs      |  13 +-
 .../application-crypto/test/src/ed25519.rs    |  13 +-
 .../application-crypto/test/src/sr25519.rs    |  13 +-
 .../primitives/consensus/common/src/lib.rs    |  10 -
 substrate/primitives/core/Cargo.toml          |   2 +
 substrate/primitives/core/src/lib.rs          |  39 ---
 substrate/primitives/core/src/offchain/mod.rs |  26 +-
 .../primitives/core/src/offchain/storage.rs   |  96 ++++++-
 .../externalities/src/extensions.rs           |  14 +
 substrate/primitives/keystore/src/lib.rs      | 171 ++++++++++-
 .../primitives/runtime/src/runtime_logger.rs  |   7 +-
 substrate/primitives/session/Cargo.toml       |   2 +
 substrate/primitives/session/src/lib.rs       |   7 +-
 substrate/primitives/state-machine/src/lib.rs | 255 ++---------------
 substrate/scripts/ci/gitlab/pipeline/test.yml |   6 +-
 substrate/scripts/run_all_benchmarks.sh       |   2 -
 substrate/test-utils/client/src/lib.rs        |  35 +--
 .../test-utils/runtime/src/genesismap.rs      |   7 +-
 substrate/test-utils/runtime/src/lib.rs       |  58 ++--
 .../runtime/src/substrate_test_pallet.rs      |   4 +-
 .../frame/benchmarking-cli/src/block/cmd.rs   |   4 +-
 .../benchmarking-cli/src/overhead/README.md   |   5 +-
 .../benchmarking-cli/src/pallet/command.rs    |  21 +-
 .../frame/benchmarking-cli/src/pallet/mod.rs  |   8 +-
 .../benchmarking-cli/src/pallet/writer.rs     |   2 -
 .../utils/frame/try-runtime/cli/src/lib.rs    |  12 +-
 96 files changed, 1175 insertions(+), 1499 deletions(-)

diff --git a/substrate/Cargo.lock b/substrate/Cargo.lock
index 76f3459b166..0fb4223f539 100644
--- a/substrate/Cargo.lock
+++ b/substrate/Cargo.lock
@@ -5415,6 +5415,7 @@ dependencies = [
  "sc-network-common",
  "sc-network-statement",
  "sc-network-sync",
+ "sc-offchain",
  "sc-rpc",
  "sc-service",
  "sc-service-test",
@@ -5577,8 +5578,10 @@ dependencies = [
  "sc-consensus-grandpa",
  "sc-executor",
  "sc-network",
+ "sc-offchain",
  "sc-rpc-api",
  "sc-service",
+ "sc-statement-store",
  "sc-telemetry",
  "sc-transaction-pool",
  "sc-transaction-pool-api",
@@ -9087,7 +9090,6 @@ dependencies = [
  "sp-core",
  "sp-database",
  "sp-externalities",
- "sp-keystore",
  "sp-runtime",
  "sp-state-machine",
  "sp-statement-store",
@@ -9216,6 +9218,7 @@ dependencies = [
  "sc-network",
  "sc-network-test",
  "sc-telemetry",
+ "sc-transaction-pool-api",
  "scale-info",
  "sp-api",
  "sp-application-crypto",
@@ -9248,6 +9251,7 @@ dependencies = [
  "sc-consensus-epochs",
  "sc-keystore",
  "sc-rpc-api",
+ "sc-transaction-pool-api",
  "serde",
  "serde_json",
  "sp-api",
@@ -9365,6 +9369,7 @@ dependencies = [
  "sc-network-gossip",
  "sc-network-test",
  "sc-telemetry",
+ "sc-transaction-pool-api",
  "sc-utils",
  "serde",
  "serde_json",
@@ -9846,6 +9851,7 @@ dependencies = [
  "hyper-rustls 0.24.0",
  "lazy_static",
  "libp2p",
+ "log",
  "num_cpus",
  "once_cell",
  "parity-scale-codec",
@@ -9855,12 +9861,15 @@ dependencies = [
  "sc-client-api",
  "sc-client-db",
  "sc-network",
+ "sc-network-common",
  "sc-transaction-pool",
  "sc-transaction-pool-api",
  "sc-utils",
  "sp-api",
  "sp-consensus",
  "sp-core",
+ "sp-externalities",
+ "sp-keystore",
  "sp-offchain",
  "sp-runtime",
  "sp-tracing",
@@ -9889,6 +9898,7 @@ dependencies = [
  "log",
  "parity-scale-codec",
  "parking_lot 0.12.1",
+ "pretty_assertions",
  "sc-block-builder",
  "sc-chain-spec",
  "sc-client-api",
@@ -9961,6 +9971,7 @@ dependencies = [
  "log",
  "parity-scale-codec",
  "parking_lot 0.12.1",
+ "pretty_assertions",
  "sc-block-builder",
  "sc-chain-spec",
  "sc-client-api",
@@ -10024,7 +10035,6 @@ dependencies = [
  "sc-network-light",
  "sc-network-sync",
  "sc-network-transactions",
- "sc-offchain",
  "sc-rpc",
  "sc-rpc-server",
  "sc-rpc-spec-v2",
@@ -10770,6 +10780,7 @@ dependencies = [
  "scale-info",
  "sp-api-proc-macro",
  "sp-core",
+ "sp-externalities",
  "sp-metadata-ir",
  "sp-runtime",
  "sp-state-machine",
@@ -11153,6 +11164,7 @@ dependencies = [
  "substrate-bip39",
  "thiserror",
  "tiny-bip39",
+ "tracing",
  "w3f-bls",
  "zeroize",
 ]
@@ -11500,6 +11512,7 @@ dependencies = [
  "scale-info",
  "sp-api",
  "sp-core",
+ "sp-keystore",
  "sp-runtime",
  "sp-staking",
  "sp-std",
diff --git a/substrate/bin/node-template/node/Cargo.toml b/substrate/bin/node-template/node/Cargo.toml
index a1d1fe1848c..39f60f52c83 100644
--- a/substrate/bin/node-template/node/Cargo.toml
+++ b/substrate/bin/node-template/node/Cargo.toml
@@ -28,6 +28,8 @@ sc-service = { version = "0.10.0-dev", path = "../../../client/service" }
 sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" }
 sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" }
 sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" }
+sc-offchain = { version = "4.0.0-dev", path = "../../../client/offchain" }
+sc-statement-store = { version = "4.0.0-dev", path = "../../../client/statement-store" }
 sc-consensus-aura = { version = "0.10.0-dev", path = "../../../client/consensus/aura" }
 sp-consensus-aura = { version = "0.10.0-dev", path = "../../../primitives/consensus/aura" }
 sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" }
diff --git a/substrate/bin/node-template/node/src/service.rs b/substrate/bin/node-template/node/src/service.rs
index 07006925fbb..355e24c9562 100644
--- a/substrate/bin/node-template/node/src/service.rs
+++ b/substrate/bin/node-template/node/src/service.rs
@@ -1,12 +1,14 @@
 //! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
 
+use futures::FutureExt;
 use node_template_runtime::{self, opaque::Block, RuntimeApi};
-use sc_client_api::BlockBackend;
+use sc_client_api::{Backend, BlockBackend};
 use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
 use sc_consensus_grandpa::SharedVoterState;
 pub use sc_executor::NativeElseWasmExecutor;
 use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams};
 use sc_telemetry::{Telemetry, TelemetryWorker};
+use sc_transaction_pool_api::OffchainTransactionPoolFactory;
 use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
 use std::{sync::Arc, time::Duration};
 
@@ -179,11 +181,23 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
 		})?;
 
 	if config.offchain_worker.enabled {
-		sc_service::build_offchain_workers(
-			&config,
-			task_manager.spawn_handle(),
-			client.clone(),
-			network.clone(),
+		task_manager.spawn_handle().spawn(
+			"offchain-workers-runner",
+			"offchain-worker",
+			sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
+				runtime_api_provider: client.clone(),
+				is_validator: config.role.is_authority(),
+				keystore: Some(keystore_container.keystore()),
+				offchain_db: backend.offchain_storage(),
+				transaction_pool: Some(OffchainTransactionPoolFactory::new(
+					transaction_pool.clone(),
+				)),
+				network_provider: network.clone(),
+				enable_http_requests: true,
+				custom_extensions: |_| vec![],
+			})
+			.run(client.clone(), task_manager.spawn_handle())
+			.boxed(),
 		);
 	}
 
@@ -224,7 +238,7 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
 		let proposer_factory = sc_basic_authorship::ProposerFactory::new(
 			task_manager.spawn_handle(),
 			client.clone(),
-			transaction_pool,
+			transaction_pool.clone(),
 			prometheus_registry.as_ref(),
 			telemetry.as_ref().map(|x| x.handle()),
 		);
@@ -300,6 +314,7 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
 			prometheus_registry,
 			shared_voter_state: SharedVoterState::empty(),
 			telemetry: telemetry.as_ref().map(|x| x.handle()),
+			offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool),
 		};
 
 		// the GRANDPA voter task is considered infallible, i.e.
diff --git a/substrate/bin/node-template/pallets/template/src/weights.rs b/substrate/bin/node-template/pallets/template/src/weights.rs
index e8fbc09bad8..7c42936e09f 100644
--- a/substrate/bin/node-template/pallets/template/src/weights.rs
+++ b/substrate/bin/node-template/pallets/template/src/weights.rs
@@ -19,7 +19,6 @@
 // *
 // --steps=50
 // --repeat=20
-// --execution=wasm
 // --wasm-execution=compiled
 // --output
 // pallets/template/src/weights.rs
diff --git a/substrate/bin/node/bench/src/construct.rs b/substrate/bin/node/bench/src/construct.rs
index ec2a829f692..4f3ca07f86b 100644
--- a/substrate/bin/node/bench/src/construct.rs
+++ b/substrate/bin/node/bench/src/construct.rs
@@ -28,7 +28,7 @@ use futures::Future;
 use std::{borrow::Cow, collections::HashMap, pin::Pin, sync::Arc};
 
 use node_primitives::Block;
-use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile};
+use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes};
 use sc_transaction_pool_api::{
 	ImportNotificationStream, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor,
 	TransactionSource, TransactionStatusStreamFor, TxHash,
@@ -43,7 +43,6 @@ use crate::{
 };
 
 pub struct ConstructionBenchmarkDescription {
-	pub profile: Profile,
 	pub key_types: KeyTypes,
 	pub block_type: BlockType,
 	pub size: SizeType,
@@ -51,7 +50,6 @@ pub struct ConstructionBenchmarkDescription {
 }
 
 pub struct ConstructionBenchmark {
-	profile: Profile,
 	database: BenchDb,
 	transactions: Transactions,
 }
@@ -60,11 +58,6 @@ impl core::BenchmarkDescription for ConstructionBenchmarkDescription {
 	fn path(&self) -> Path {
 		let mut path = Path::new(&["node", "proposer"]);
 
-		match self.profile {
-			Profile::Wasm => path.push("wasm"),
-			Profile::Native => path.push("native"),
-		}
-
 		match self.key_types {
 			KeyTypes::Sr25519 => path.push("sr25519"),
 			KeyTypes::Ed25519 => path.push("ed25519"),
@@ -99,7 +92,6 @@ impl core::BenchmarkDescription for ConstructionBenchmarkDescription {
 		}
 
 		Box::new(ConstructionBenchmark {
-			profile: self.profile,
 			database: bench_db,
 			transactions: Transactions(extrinsics),
 		})
@@ -107,8 +99,8 @@ impl core::BenchmarkDescription for ConstructionBenchmarkDescription {
 
 	fn name(&self) -> Cow<'static, str> {
 		format!(
-			"Block construction ({:?}/{}, {:?}, {:?} backend)",
-			self.block_type, self.size, self.profile, self.database_type,
+			"Block construction ({:?}/{}, {:?} backend)",
+			self.block_type, self.size, self.database_type,
 		)
 		.into()
 	}
@@ -116,7 +108,7 @@ impl core::BenchmarkDescription for ConstructionBenchmarkDescription {
 
 impl core::Benchmark for ConstructionBenchmark {
 	fn run(&mut self, mode: Mode) -> std::time::Duration {
-		let context = self.database.create_context(self.profile);
+		let context = self.database.create_context();
 
 		let _ = context
 			.client
diff --git a/substrate/bin/node/bench/src/import.rs b/substrate/bin/node/bench/src/import.rs
index 167377ea9a2..78b280076e0 100644
--- a/substrate/bin/node/bench/src/import.rs
+++ b/substrate/bin/node/bench/src/import.rs
@@ -33,7 +33,7 @@
 use std::borrow::Cow;
 
 use node_primitives::Block;
-use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile};
+use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes};
 use sc_client_api::backend::Backend;
 use sp_state_machine::InspectState;
 
@@ -43,7 +43,6 @@ use crate::{
 };
 
 pub struct ImportBenchmarkDescription {
-	pub profile: Profile,
 	pub key_types: KeyTypes,
 	pub block_type: BlockType,
 	pub size: SizeType,
@@ -51,7 +50,6 @@ pub struct ImportBenchmarkDescription {
 }
 
 pub struct ImportBenchmark {
-	profile: Profile,
 	database: BenchDb,
 	block: Block,
 	block_type: BlockType,
@@ -61,11 +59,6 @@ impl core::BenchmarkDescription for ImportBenchmarkDescription {
 	fn path(&self) -> Path {
 		let mut path = Path::new(&["node", "import"]);
 
-		match self.profile {
-			Profile::Wasm => path.push("wasm"),
-			Profile::Native => path.push("native"),
-		}
-
 		match self.key_types {
 			KeyTypes::Sr25519 => path.push("sr25519"),
 			KeyTypes::Ed25519 => path.push("ed25519"),
@@ -88,21 +81,15 @@ impl core::BenchmarkDescription for ImportBenchmarkDescription {
 	}
 
 	fn setup(self: Box<Self>) -> Box<dyn core::Benchmark> {
-		let profile = self.profile;
 		let mut bench_db = BenchDb::with_key_types(self.database_type, 50_000, self.key_types);
 		let block = bench_db.generate_block(self.block_type.to_content(self.size.transactions()));
-		Box::new(ImportBenchmark {
-			database: bench_db,
-			block_type: self.block_type,
-			block,
-			profile,
-		})
+		Box::new(ImportBenchmark { database: bench_db, block_type: self.block_type, block })
 	}
 
 	fn name(&self) -> Cow<'static, str> {
 		format!(
-			"Block import ({:?}/{}, {:?}, {:?} backend)",
-			self.block_type, self.size, self.profile, self.database_type,
+			"Block import ({:?}/{}, {:?} backend)",
+			self.block_type, self.size, self.database_type,
 		)
 		.into()
 	}
@@ -110,7 +97,7 @@ impl core::BenchmarkDescription for ImportBenchmarkDescription {
 
 impl core::Benchmark for ImportBenchmark {
 	fn run(&mut self, mode: Mode) -> std::time::Duration {
-		let mut context = self.database.create_context(self.profile);
+		let mut context = self.database.create_context();
 
 		let _ = context
 			.client
diff --git a/substrate/bin/node/bench/src/main.rs b/substrate/bin/node/bench/src/main.rs
index 051d8ddb9bf..1f69c976958 100644
--- a/substrate/bin/node/bench/src/main.rs
+++ b/substrate/bin/node/bench/src/main.rs
@@ -30,7 +30,7 @@ mod txpool;
 
 use clap::Parser;
 
-use node_testing::bench::{BlockType, DatabaseType as BenchDataBaseType, KeyTypes, Profile};
+use node_testing::bench::{BlockType, DatabaseType as BenchDataBaseType, KeyTypes};
 
 use crate::{
 	common::SizeType,
@@ -85,31 +85,28 @@ fn main() {
 
 	let mut import_benchmarks = Vec::new();
 
-	for profile in [Profile::Wasm, Profile::Native] {
-		for size in [
-			SizeType::Empty,
-			SizeType::Small,
-			SizeType::Medium,
-			SizeType::Large,
-			SizeType::Full,
-			SizeType::Custom(opt.transactions.unwrap_or(0)),
+	for size in [
+		SizeType::Empty,
+		SizeType::Small,
+		SizeType::Medium,
+		SizeType::Large,
+		SizeType::Full,
+		SizeType::Custom(opt.transactions.unwrap_or(0)),
+	] {
+		for block_type in [
+			BlockType::RandomTransfersKeepAlive,
+			BlockType::RandomTransfersReaping,
+			BlockType::Noop,
 		] {
-			for block_type in [
-				BlockType::RandomTransfersKeepAlive,
-				BlockType::RandomTransfersReaping,
-				BlockType::Noop,
-			] {
-				for database_type in [BenchDataBaseType::RocksDb, BenchDataBaseType::ParityDb] {
-					import_benchmarks.push((profile, size, block_type, database_type));
-				}
+			for database_type in [BenchDataBaseType::RocksDb, BenchDataBaseType::ParityDb] {
+				import_benchmarks.push((size, block_type, database_type));
 			}
 		}
 	}
 
 	let benchmarks = matrix!(
-		(profile, size, block_type, database_type) in import_benchmarks.into_iter() =>
+		(size, block_type, database_type) in import_benchmarks.into_iter() =>
 			ImportBenchmarkDescription {
-				profile,
 				key_types: KeyTypes::Sr25519,
 				size,
 				block_type,
@@ -138,14 +135,12 @@ fn main() {
 			.iter().map(move |db_type| (size, db_type)))
 			=> TrieWriteBenchmarkDescription { database_size: *size, database_type: *db_type },
 		ConstructionBenchmarkDescription {
-			profile: Profile::Wasm,
 			key_types: KeyTypes::Sr25519,
 			block_type: BlockType::RandomTransfersKeepAlive,
 			size: SizeType::Medium,
 			database_type: BenchDataBaseType::RocksDb,
 		},
 		ConstructionBenchmarkDescription {
-			profile: Profile::Wasm,
 			key_types: KeyTypes::Sr25519,
 			block_type: BlockType::RandomTransfersKeepAlive,
 			size: SizeType::Large,
diff --git a/substrate/bin/node/bench/src/txpool.rs b/substrate/bin/node/bench/src/txpool.rs
index 4e8e5c0d9a4..a3524ac5bc8 100644
--- a/substrate/bin/node/bench/src/txpool.rs
+++ b/substrate/bin/node/bench/src/txpool.rs
@@ -23,7 +23,7 @@
 
 use std::borrow::Cow;
 
-use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile};
+use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes};
 
 use sc_transaction_pool::BasicPool;
 use sc_transaction_pool_api::{TransactionPool, TransactionSource};
@@ -57,7 +57,7 @@ impl core::BenchmarkDescription for PoolBenchmarkDescription {
 
 impl core::Benchmark for PoolBenchmark {
 	fn run(&mut self, mode: Mode) -> std::time::Duration {
-		let context = self.database.create_context(Profile::Wasm);
+		let context = self.database.create_context();
 
 		let _ = context
 			.client
diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml
index d9b3ebee995..032ba271f3c 100644
--- a/substrate/bin/node/cli/Cargo.toml
+++ b/substrate/bin/node/cli/Cargo.toml
@@ -83,6 +83,7 @@ sc-authority-discovery = { version = "0.10.0-dev", path = "../../../client/autho
 sc-sync-state-rpc = { version = "0.10.0-dev", path = "../../../client/sync-state-rpc" }
 sc-sysinfo = { version = "6.0.0-dev", path = "../../../client/sysinfo" }
 sc-storage-monitor = { version = "0.1.0", path = "../../../client/storage-monitor" }
+sc-offchain = { version = "4.0.0-dev", path = "../../../client/offchain" }
 
 # frame dependencies
 frame-system = { version = "4.0.0-dev", path = "../../../frame/system" }
diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs
index 527b145c62c..5ee538d18d6 100644
--- a/substrate/bin/node/cli/benches/block_production.rs
+++ b/substrate/bin/node/cli/benches/block_production.rs
@@ -21,7 +21,6 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughpu
 use kitchensink_runtime::{constants::currency::*, BalancesCall};
 use node_cli::service::{create_extrinsic, FullClient};
 use sc_block_builder::{BlockBuilderProvider, BuiltBlock, RecordProof};
-use sc_client_api::execution_extensions::ExecutionStrategies;
 use sc_consensus::{
 	block_import::{BlockImportParams, ForkChoiceStrategy},
 	BlockImport, StateAction,
@@ -56,9 +55,6 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase {
 
 	let spec = Box::new(node_cli::chain_spec::development_config());
 
-	// NOTE: We enforce the use of the WASM runtime to benchmark block production using WASM.
-	let execution_strategy = sc_client_api::ExecutionStrategy::AlwaysWasm;
-
 	let config = Configuration {
 		impl_name: "BenchmarkImpl".into(),
 		impl_version: "1.0".into(),
@@ -77,13 +73,6 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase {
 		wasm_method: WasmExecutionMethod::Compiled {
 			instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite,
 		},
-		execution_strategies: ExecutionStrategies {
-			syncing: execution_strategy,
-			importing: execution_strategy,
-			block_construction: execution_strategy,
-			offchain_worker: execution_strategy,
-			other: execution_strategy,
-		},
 		rpc_addr: None,
 		rpc_max_connections: Default::default(),
 		rpc_cors: None,
diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs
index 44ebe1e7d4f..d3e8c02a958 100644
--- a/substrate/bin/node/cli/benches/transaction_pool.rs
+++ b/substrate/bin/node/cli/benches/transaction_pool.rs
@@ -23,7 +23,6 @@ use futures::{future, StreamExt};
 use kitchensink_runtime::{constants::currency::*, BalancesCall, SudoCall};
 use node_cli::service::{create_extrinsic, fetch_nonce, FullClient, TransactionPool};
 use node_primitives::AccountId;
-use sc_client_api::execution_extensions::ExecutionStrategies;
 use sc_service::{
 	config::{
 		BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig,
@@ -70,14 +69,6 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase {
 		blocks_pruning: BlocksPruning::KeepAll,
 		chain_spec: spec,
 		wasm_method: Default::default(),
-		// NOTE: we enforce the use of the native runtime to make the errors more debuggable
-		execution_strategies: ExecutionStrategies {
-			syncing: sc_client_api::ExecutionStrategy::NativeWhenPossible,
-			importing: sc_client_api::ExecutionStrategy::NativeWhenPossible,
-			block_construction: sc_client_api::ExecutionStrategy::NativeWhenPossible,
-			offchain_worker: sc_client_api::ExecutionStrategy::NativeWhenPossible,
-			other: sc_client_api::ExecutionStrategy::NativeWhenPossible,
-		},
 		rpc_addr: None,
 		rpc_max_connections: Default::default(),
 		rpc_cors: None,
diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs
index 81f45753844..487c6c48f00 100644
--- a/substrate/bin/node/cli/src/service.rs
+++ b/substrate/bin/node/cli/src/service.rs
@@ -28,7 +28,7 @@ use futures::prelude::*;
 use kitchensink_runtime::RuntimeApi;
 use node_executor::ExecutorDispatch;
 use node_primitives::Block;
-use sc_client_api::BlockBackend;
+use sc_client_api::{Backend, BlockBackend};
 use sc_consensus_babe::{self, SlotProportion};
 use sc_executor::NativeElseWasmExecutor;
 use sc_network::{event::Event, NetworkEventStream, NetworkService};
@@ -37,6 +37,7 @@ use sc_network_sync::SyncingService;
 use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager};
 use sc_statement_store::Store as StatementStore;
 use sc_telemetry::{Telemetry, TelemetryWorker};
+use sc_transaction_pool_api::OffchainTransactionPoolFactory;
 use sp_api::ProvideRuntimeApi;
 use sp_core::crypto::Pair;
 use sp_runtime::{generic, traits::Block as BlockT, SaturatedConversion};
@@ -205,27 +206,29 @@ pub fn new_partial(
 	)?;
 
 	let slot_duration = babe_link.config().slot_duration();
-	let (import_queue, babe_worker_handle) = sc_consensus_babe::import_queue(
-		babe_link.clone(),
-		block_import.clone(),
-		Some(Box::new(justification_import)),
-		client.clone(),
-		select_chain.clone(),
-		move |_, ()| async move {
-			let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
+	let (import_queue, babe_worker_handle) =
+		sc_consensus_babe::import_queue(sc_consensus_babe::ImportQueueParams {
+			link: babe_link.clone(),
+			block_import: block_import.clone(),
+			justification_import: Some(Box::new(justification_import)),
+			client: client.clone(),
+			select_chain: select_chain.clone(),
+			create_inherent_data_providers: move |_, ()| async move {
+				let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
 
-			let slot =
+				let slot =
 				sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
 					*timestamp,
 					slot_duration,
 				);
 
-			Ok((slot, timestamp))
-		},
-		&task_manager.spawn_essential_handle(),
-		config.prometheus_registry(),
-		telemetry.as_ref().map(|x| x.handle()),
-	)?;
+				Ok((slot, timestamp))
+			},
+			spawner: &task_manager.spawn_essential_handle(),
+			registry: config.prometheus_registry(),
+			telemetry: telemetry.as_ref().map(|x| x.handle()),
+			offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()),
+		})?;
 
 	let import_setup = (block_import, grandpa_link, babe_link);
 
@@ -278,9 +281,10 @@ pub fn new_partial(
 					finality_provider: finality_proof_provider.clone(),
 				},
 				statement_store: rpc_statement_store.clone(),
+				backend: rpc_backend.clone(),
 			};
 
-			node_rpc::create_full(deps, rpc_backend.clone()).map_err(Into::into)
+			node_rpc::create_full(deps).map_err(Into::into)
 		};
 
 		(rpc_extensions_builder, shared_voter_state2)
@@ -381,15 +385,6 @@ pub fn new_full_base(
 			warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)),
 		})?;
 
-	if config.offchain_worker.enabled {
-		sc_service::build_offchain_workers(
-			&config,
-			task_manager.spawn_handle(),
-			client.clone(),
-			network.clone(),
-		);
-	}
-
 	let role = config.role.clone();
 	let force_authoring = config.force_authoring;
 	let backoff_authoring_blocks =
@@ -397,10 +392,11 @@ pub fn new_full_base(
 	let name = config.network.node_name.clone();
 	let enable_grandpa = !config.disable_grandpa;
 	let prometheus_registry = config.prometheus_registry().cloned();
+	let enable_offchain_worker = config.offchain_worker.enabled;
 
 	let rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
 		config,
-		backend,
+		backend: backend.clone(),
 		client: client.clone(),
 		keystore: keystore_container.keystore(),
 		network: network.clone(),
@@ -525,14 +521,14 @@ pub fn new_full_base(
 	// need a keystore, regardless of which protocol we use below.
 	let keystore = if role.is_authority() { Some(keystore_container.keystore()) } else { None };
 
-	let config = grandpa::Config {
+	let grandpa_config = grandpa::Config {
 		// FIXME #1578 make this available through chainspec
 		gossip_duration: std::time::Duration::from_millis(333),
 		justification_period: 512,
 		name: Some(name),
 		observer_enabled: false,
 		keystore,
-		local_role: role,
+		local_role: role.clone(),
 		telemetry: telemetry.as_ref().map(|x| x.handle()),
 		protocol_name: grandpa_protocol_name,
 	};
@@ -545,7 +541,7 @@ pub fn new_full_base(
 		// been tested extensively yet and having most nodes in a network run it
 		// could lead to finality stalls.
 		let grandpa_config = grandpa::GrandpaParams {
-			config,
+			config: grandpa_config,
 			link: grandpa_link,
 			network: network.clone(),
 			sync: Arc::new(sync_service.clone()),
@@ -553,6 +549,7 @@ pub fn new_full_base(
 			voting_rule: grandpa::VotingRulesBuilder::default().build(),
 			prometheus_registry: prometheus_registry.clone(),
 			shared_voter_state,
+			offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()),
 		};
 
 		// the GRANDPA voter task is considered infallible, i.e.
@@ -584,6 +581,29 @@ pub fn new_full_base(
 		statement_handler.run(),
 	);
 
+	if enable_offchain_worker {
+		task_manager.spawn_handle().spawn(
+			"offchain-workers-runner",
+			"offchain-work",
+			sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
+				runtime_api_provider: client.clone(),
+				keystore: Some(keystore_container.keystore()),
+				offchain_db: backend.offchain_storage(),
+				transaction_pool: Some(OffchainTransactionPoolFactory::new(
+					transaction_pool.clone(),
+				)),
+				network_provider: network.clone(),
+				is_validator: role.is_authority(),
+				enable_http_requests: true,
+				custom_extensions: move |_| {
+					vec![Box::new(statement_store.clone().as_statement_store_ext()) as Box<_>]
+				},
+			})
+			.run(client.clone(), task_manager.spawn_handle())
+			.boxed(),
+		);
+	}
+
 	network_starter.start_network();
 	Ok(NewFullBase {
 		task_manager,
diff --git a/substrate/bin/node/cli/tests/benchmark_block_works.rs b/substrate/bin/node/cli/tests/benchmark_block_works.rs
index 50103a66a4d..09c2f262e2c 100644
--- a/substrate/bin/node/cli/tests/benchmark_block_works.rs
+++ b/substrate/bin/node/cli/tests/benchmark_block_works.rs
@@ -39,7 +39,7 @@ async fn benchmark_block_works() {
 		.arg(base_dir.path())
 		.args(["--from", "1", "--to", "1"])
 		.args(["--repeat", "1"])
-		.args(["--execution", "wasm", "--wasm-execution", "compiled"])
+		.args(["--wasm-execution", "compiled"])
 		.status()
 		.unwrap();
 
diff --git a/substrate/bin/node/rpc/src/lib.rs b/substrate/bin/node/rpc/src/lib.rs
index 5ab96bf1c70..40c4741dbc1 100644
--- a/substrate/bin/node/rpc/src/lib.rs
+++ b/substrate/bin/node/rpc/src/lib.rs
@@ -90,12 +90,23 @@ pub struct FullDeps<C, P, SC, B> {
 	pub grandpa: GrandpaDeps<B>,
 	/// Shared statement store reference.
 	pub statement_store: Arc<dyn sp_statement_store::StatementStore>,
+	/// The backend used by the node.
+	pub backend: Arc<B>,
 }
 
 /// Instantiate all Full RPC extensions.
 pub fn create_full<C, P, SC, B>(
-	deps: FullDeps<C, P, SC, B>,
-	backend: Arc<B>,
+	FullDeps {
+		client,
+		pool,
+		select_chain,
+		chain_spec,
+		deny_unsafe,
+		babe,
+		grandpa,
+		statement_store,
+		backend,
+	}: FullDeps<C, P, SC, B>,
 ) -> Result<RpcModule<()>, Box<dyn std::error::Error + Send + Sync>>
 where
 	C: ProvideRuntimeApi<Block>
@@ -130,16 +141,6 @@ where
 	use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer};
 
 	let mut io = RpcModule::new(());
-	let FullDeps {
-		client,
-		pool,
-		select_chain,
-		chain_spec,
-		deny_unsafe,
-		babe,
-		grandpa,
-		statement_store,
-	} = deps;
 
 	let BabeDeps { keystore, babe_worker_handle } = babe;
 	let GrandpaDeps {
@@ -159,7 +160,15 @@ where
 	// Making synchronous calls in light client freezes the browser currently,
 	// more context: https://github.com/paritytech/substrate/pull/3480
 	// These RPCs should use an asynchronous caller instead.
-	io.merge(Mmr::new(client.clone()).into_rpc())?;
+	io.merge(
+		Mmr::new(
+			client.clone(),
+			backend
+				.offchain_storage()
+				.ok_or_else(|| "Backend doesn't provide an offchain storage")?,
+		)
+		.into_rpc(),
+	)?;
 	io.merge(TransactionPayment::new(client.clone()).into_rpc())?;
 	io.merge(
 		Babe::new(client.clone(), babe_worker_handle.clone(), keystore, select_chain, deny_unsafe)
diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs
index 9708b2c3432..f1ab2212239 100644
--- a/substrate/bin/node/testing/src/bench.rs
+++ b/substrate/bin/node/testing/src/bench.rs
@@ -40,17 +40,14 @@ use kitchensink_runtime::{
 };
 use node_primitives::Block;
 use sc_block_builder::BlockBuilderProvider;
-use sc_client_api::{
-	execution_extensions::{ExecutionExtensions, ExecutionStrategies},
-	ExecutionStrategy,
-};
+use sc_client_api::execution_extensions::ExecutionExtensions;
 use sc_client_db::PruningMode;
 use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, ImportedAux};
 use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod, WasmtimeInstantiationStrategy};
 use sp_api::ProvideRuntimeApi;
 use sp_block_builder::BlockBuilder;
 use sp_consensus::BlockOrigin;
-use sp_core::{blake2_256, ed25519, sr25519, traits::SpawnNamed, ExecutionContext, Pair, Public};
+use sp_core::{blake2_256, ed25519, sr25519, traits::SpawnNamed, Pair, Public};
 use sp_inherents::InherentData;
 use sp_runtime::{
 	traits::{Block as BlockT, IdentifyAccount, Verify},
@@ -354,7 +351,7 @@ impl BenchDb {
 			dir.path().to_string_lossy(),
 		);
 		let (_client, _backend, _task_executor) =
-			Self::bench_client(database_type, dir.path(), Profile::Native, &keyring);
+			Self::bench_client(database_type, dir.path(), &keyring);
 		let directory_guard = Guard(dir);
 
 		BenchDb { keyring, directory_guard, database_type }
@@ -380,7 +377,6 @@ impl BenchDb {
 	fn bench_client(
 		database_type: DatabaseType,
 		dir: &std::path::Path,
-		profile: Profile,
 		keyring: &BenchKeyring,
 	) -> (Client, std::sync::Arc<Backend>, TaskExecutor) {
 		let db_config = sc_client_db::DatabaseSettings {
@@ -415,12 +411,7 @@ impl BenchDb {
 			genesis_block_builder,
 			None,
 			None,
-			ExecutionExtensions::new(
-				profile.into_execution_strategies(),
-				None,
-				None,
-				Arc::new(executor),
-			),
+			ExecutionExtensions::new(None, Arc::new(executor)),
 			Box::new(task_executor.clone()),
 			None,
 			None,
@@ -444,11 +435,7 @@ impl BenchDb {
 
 		client
 			.runtime_api()
-			.inherent_extrinsics_with_context(
-				client.chain_info().genesis_hash,
-				ExecutionContext::BlockConstruction,
-				inherent_data,
-			)
+			.inherent_extrinsics(client.chain_info().genesis_hash, inherent_data)
 			.expect("Get inherents failed")
 	}
 
@@ -459,12 +446,8 @@ impl BenchDb {
 
 	/// Get cliet for this database operations.
 	pub fn client(&mut self) -> Client {
-		let (client, _backend, _task_executor) = Self::bench_client(
-			self.database_type,
-			self.directory_guard.path(),
-			Profile::Wasm,
-			&self.keyring,
-		);
+		let (client, _backend, _task_executor) =
+			Self::bench_client(self.database_type, self.directory_guard.path(), &self.keyring);
 
 		client
 	}
@@ -507,10 +490,10 @@ impl BenchDb {
 	}
 
 	/// Clone this database and create context for testing/benchmarking.
-	pub fn create_context(&self, profile: Profile) -> BenchContext {
+	pub fn create_context(&self) -> BenchContext {
 		let BenchDb { directory_guard, keyring, database_type } = self.clone();
 		let (client, backend, task_executor) =
-			Self::bench_client(database_type, directory_guard.path(), profile, &keyring);
+			Self::bench_client(database_type, directory_guard.path(), &keyring);
 
 		BenchContext {
 			client: Arc::new(client),
@@ -611,36 +594,6 @@ impl BenchKeyring {
 	}
 }
 
-/// Profile for exetion strategies.
-#[derive(Clone, Copy, Debug)]
-pub enum Profile {
-	/// As native as possible.
-	Native,
-	/// As wasm as possible.
-	Wasm,
-}
-
-impl Profile {
-	fn into_execution_strategies(self) -> ExecutionStrategies {
-		match self {
-			Profile::Wasm => ExecutionStrategies {
-				syncing: ExecutionStrategy::AlwaysWasm,
-				importing: ExecutionStrategy::AlwaysWasm,
-				block_construction: ExecutionStrategy::AlwaysWasm,
-				offchain_worker: ExecutionStrategy::AlwaysWasm,
-				other: ExecutionStrategy::AlwaysWasm,
-			},
-			Profile::Native => ExecutionStrategies {
-				syncing: ExecutionStrategy::NativeElseWasm,
-				importing: ExecutionStrategy::NativeElseWasm,
-				block_construction: ExecutionStrategy::NativeElseWasm,
-				offchain_worker: ExecutionStrategy::NativeElseWasm,
-				other: ExecutionStrategy::NativeElseWasm,
-			},
-		}
-	}
-}
-
 struct Guard(tempfile::TempDir);
 
 impl Guard {
diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml
index be47c2931ab..17f9747c39b 100644
--- a/substrate/client/api/Cargo.toml
+++ b/substrate/client/api/Cargo.toml
@@ -31,7 +31,6 @@ sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/comm
 sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" }
 sp-database = { version = "4.0.0-dev", path = "../../primitives/database" }
 sp-externalities = { version = "0.19.0", path = "../../primitives/externalities" }
-sp-keystore = { version = "0.27.0", default-features = false, path = "../../primitives/keystore" }
 sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" }
 sp-state-machine = { version = "0.28.0", path = "../../primitives/state-machine" }
 sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement-store" }
diff --git a/substrate/client/api/src/call_executor.rs b/substrate/client/api/src/call_executor.rs
index db8e4d8495a..cecc1053d1b 100644
--- a/substrate/client/api/src/call_executor.rs
+++ b/substrate/client/api/src/call_executor.rs
@@ -20,12 +20,13 @@
 
 use sc_executor::{RuntimeVersion, RuntimeVersionOf};
 use sp_core::traits::CallContext;
+use sp_externalities::Extensions;
 use sp_runtime::traits::Block as BlockT;
-use sp_state_machine::{ExecutionStrategy, OverlayedChanges, StorageProof};
+use sp_state_machine::{OverlayedChanges, StorageProof};
 use std::cell::RefCell;
 
 use crate::execution_extensions::ExecutionExtensions;
-use sp_api::{ExecutionContext, ProofRecorder, StorageTransactionCache};
+use sp_api::{ProofRecorder, StorageTransactionCache};
 
 /// Executor Provider
 pub trait ExecutorProvider<Block: BlockT> {
@@ -58,7 +59,6 @@ pub trait CallExecutor<B: BlockT>: RuntimeVersionOf {
 		at_hash: B::Hash,
 		method: &str,
 		call_data: &[u8],
-		strategy: ExecutionStrategy,
 		context: CallContext,
 	) -> Result<Vec<u8>, sp_blockchain::Error>;
 
@@ -79,7 +79,8 @@ pub trait CallExecutor<B: BlockT>: RuntimeVersionOf {
 			>,
 		>,
 		proof_recorder: &Option<ProofRecorder<B>>,
-		context: ExecutionContext,
+		call_context: CallContext,
+		extensions: &RefCell<Extensions>,
 	) -> sp_blockchain::Result<Vec<u8>>;
 
 	/// Extract RuntimeVersion of given block
diff --git a/substrate/client/api/src/execution_extensions.rs b/substrate/client/api/src/execution_extensions.rs
index 20dc34d19b1..6f927105df0 100644
--- a/substrate/client/api/src/execution_extensions.rs
+++ b/substrate/client/api/src/execution_extensions.rs
@@ -23,48 +23,10 @@
 //! extensions to support APIs for particular execution context & capabilities.
 
 use parking_lot::RwLock;
-use sc_transaction_pool_api::OffchainTransactionPoolFactory;
-use sp_core::{
-	offchain::{self, OffchainDbExt, OffchainWorkerExt},
-	traits::{ReadRuntimeVersion, ReadRuntimeVersionExt},
-	ExecutionContext,
-};
+use sp_core::traits::{ReadRuntimeVersion, ReadRuntimeVersionExt};
 use sp_externalities::{Extension, Extensions};
-use sp_keystore::{KeystoreExt, KeystorePtr};
 use sp_runtime::traits::{Block as BlockT, NumberFor};
-pub use sp_state_machine::ExecutionStrategy;
-use sp_state_machine::{DefaultHandler, ExecutionManager};
-use std::{
-	marker::PhantomData,
-	sync::{Arc, Weak},
-};
-
-/// Execution strategies settings.
-#[derive(Debug, Clone)]
-pub struct ExecutionStrategies {
-	/// Execution strategy used when syncing.
-	pub syncing: ExecutionStrategy,
-	/// Execution strategy used when importing blocks.
-	pub importing: ExecutionStrategy,
-	/// Execution strategy used when constructing blocks.
-	pub block_construction: ExecutionStrategy,
-	/// Execution strategy used for offchain workers.
-	pub offchain_worker: ExecutionStrategy,
-	/// Execution strategy used in other cases.
-	pub other: ExecutionStrategy,
-}
-
-impl Default for ExecutionStrategies {
-	fn default() -> ExecutionStrategies {
-		ExecutionStrategies {
-			syncing: ExecutionStrategy::NativeElseWasm,
-			importing: ExecutionStrategy::NativeElseWasm,
-			block_construction: ExecutionStrategy::AlwaysWasm,
-			offchain_worker: ExecutionStrategy::NativeWhenPossible,
-			other: ExecutionStrategy::NativeElseWasm,
-		}
-	}
-}
+use std::{marker::PhantomData, sync::Arc};
 
 /// Generate the starting set of [`Extensions`].
 ///
@@ -74,22 +36,12 @@ pub trait ExtensionsFactory<Block: BlockT>: Send + Sync {
 	///
 	/// - `block_hash`: The hash of the block in the context that extensions will be used.
 	/// - `block_number`: The number of the block in the context that extensions will be used.
-	/// - `capabilities`: The capabilities
-	fn extensions_for(
-		&self,
-		block_hash: Block::Hash,
-		block_number: NumberFor<Block>,
-		capabilities: offchain::Capabilities,
-	) -> Extensions;
+	fn extensions_for(&self, block_hash: Block::Hash, block_number: NumberFor<Block>)
+		-> Extensions;
 }
 
 impl<Block: BlockT> ExtensionsFactory<Block> for () {
-	fn extensions_for(
-		&self,
-		_: Block::Hash,
-		_: NumberFor<Block>,
-		_capabilities: offchain::Capabilities,
-	) -> Extensions {
+	fn extensions_for(&self, _: Block::Hash, _: NumberFor<Block>) -> Extensions {
 		Extensions::new()
 	}
 }
@@ -99,10 +51,9 @@ impl<Block: BlockT, T: ExtensionsFactory<Block>> ExtensionsFactory<Block> for Ve
 		&self,
 		block_hash: Block::Hash,
 		block_number: NumberFor<Block>,
-		capabilities: offchain::Capabilities,
 	) -> Extensions {
 		let mut exts = Extensions::new();
-		exts.extend(self.iter().map(|e| e.extensions_for(block_hash, block_number, capabilities)));
+		exts.extend(self.iter().map(|e| e.extensions_for(block_hash, block_number)));
 		exts
 	}
 }
@@ -125,12 +76,7 @@ impl<Block: BlockT, Ext> ExtensionBeforeBlock<Block, Ext> {
 impl<Block: BlockT, Ext: Default + Extension> ExtensionsFactory<Block>
 	for ExtensionBeforeBlock<Block, Ext>
 {
-	fn extensions_for(
-		&self,
-		_: Block::Hash,
-		block_number: NumberFor<Block>,
-		_: offchain::Capabilities,
-	) -> Extensions {
+	fn extensions_for(&self, _: Block::Hash, block_number: NumberFor<Block>) -> Extensions {
 		let mut exts = Extensions::new();
 
 		if block_number < self.before {
@@ -141,154 +87,47 @@ impl<Block: BlockT, Ext: Default + Extension> ExtensionsFactory<Block>
 	}
 }
 
-/// Create a Offchain DB accessor object.
-pub trait DbExternalitiesFactory: Send + Sync {
-	/// Create [`offchain::DbExternalities`] instance.
-	fn create(&self) -> Box<dyn offchain::DbExternalities>;
-}
-
-impl<T: offchain::DbExternalities + Clone + Sync + Send + 'static> DbExternalitiesFactory for T {
-	fn create(&self) -> Box<dyn offchain::DbExternalities> {
-		Box::new(self.clone())
-	}
-}
-
 /// A producer of execution extensions for offchain calls.
 ///
 /// This crate aggregates extensions available for the offchain calls
 /// and is responsible for producing a correct `Extensions` object.
 /// for each call, based on required `Capabilities`.
 pub struct ExecutionExtensions<Block: BlockT> {
-	strategies: ExecutionStrategies,
-	keystore: Option<KeystorePtr>,
-	offchain_db: Option<Box<dyn DbExternalitiesFactory>>,
-	// FIXME: these three are only RwLock because of https://github.com/paritytech/substrate/issues/4587
-	//        remove when fixed.
-	transaction_pool_factory: RwLock<Option<OffchainTransactionPoolFactory<Block>>>,
 	extensions_factory: RwLock<Box<dyn ExtensionsFactory<Block>>>,
-	statement_store: RwLock<Option<Weak<dyn sp_statement_store::StatementStore>>>,
 	read_runtime_version: Arc<dyn ReadRuntimeVersion>,
 }
 
 impl<Block: BlockT> ExecutionExtensions<Block> {
-	/// Create new `ExecutionExtensions` given a `keystore` and `ExecutionStrategies`.
+	/// Create new `ExecutionExtensions` given an `extensions_factory`.
 	pub fn new(
-		strategies: ExecutionStrategies,
-		keystore: Option<KeystorePtr>,
-		offchain_db: Option<Box<dyn DbExternalitiesFactory>>,
+		extensions_factory: Option<Box<dyn ExtensionsFactory<Block>>>,
 		read_runtime_version: Arc<dyn ReadRuntimeVersion>,
 	) -> Self {
-		let transaction_pool = RwLock::new(None);
-		let statement_store = RwLock::new(None);
-		let extensions_factory = Box::new(());
 		Self {
-			strategies,
-			keystore,
-			offchain_db,
-			extensions_factory: RwLock::new(extensions_factory),
-			transaction_pool_factory: transaction_pool,
-			statement_store,
+			extensions_factory: extensions_factory
+				.map(RwLock::new)
+				.unwrap_or_else(|| RwLock::new(Box::new(()))),
 			read_runtime_version,
 		}
 	}
 
-	/// Get a reference to the execution strategies.
-	pub fn strategies(&self) -> &ExecutionStrategies {
-		&self.strategies
-	}
-
 	/// Set the new extensions_factory
 	pub fn set_extensions_factory(&self, maker: impl ExtensionsFactory<Block> + 'static) {
 		*self.extensions_factory.write() = Box::new(maker);
 	}
 
-	/// Register transaction pool extension.
-	pub fn register_transaction_pool_factory(
-		&self,
-		factory: OffchainTransactionPoolFactory<Block>,
-	) {
-		*self.transaction_pool_factory.write() = Some(factory);
-	}
-
-	/// Register statement store extension.
-	pub fn register_statement_store(&self, store: Arc<dyn sp_statement_store::StatementStore>) {
-		*self.statement_store.write() = Some(Arc::downgrade(&store) as _);
-	}
-
 	/// Based on the execution context and capabilities it produces
 	/// the extensions object to support desired set of APIs.
 	pub fn extensions(
 		&self,
 		block_hash: Block::Hash,
 		block_number: NumberFor<Block>,
-		context: ExecutionContext,
 	) -> Extensions {
-		let capabilities = context.capabilities();
-
 		let mut extensions =
-			self.extensions_factory
-				.read()
-				.extensions_for(block_hash, block_number, capabilities);
-
-		if capabilities.contains(offchain::Capabilities::KEYSTORE) {
-			if let Some(ref keystore) = self.keystore {
-				extensions.register(KeystoreExt(keystore.clone()));
-			}
-		}
-
-		if capabilities.contains(offchain::Capabilities::TRANSACTION_POOL) {
-			if let Some(pool) = self.transaction_pool_factory.read().as_ref() {
-				extensions.register(pool.offchain_transaction_pool(block_hash));
-			}
-		}
-
-		if capabilities.contains(offchain::Capabilities::STATEMENT_STORE) {
-			if let Some(store) = self.statement_store.read().as_ref().and_then(|x| x.upgrade()) {
-				extensions.register(sp_statement_store::runtime_api::StatementStoreExt(store));
-			}
-		}
-		if capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_READ) ||
-			capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_WRITE)
-		{
-			if let Some(offchain_db) = self.offchain_db.as_ref() {
-				extensions.register(OffchainDbExt::new(offchain::LimitedExternalities::new(
-					capabilities,
-					offchain_db.create(),
-				)));
-			}
-		}
-
-		if let ExecutionContext::OffchainCall(Some(ext)) = context {
-			extensions.register(OffchainWorkerExt::new(offchain::LimitedExternalities::new(
-				capabilities,
-				ext.0,
-			)));
-		}
+			self.extensions_factory.read().extensions_for(block_hash, block_number);
 
 		extensions.register(ReadRuntimeVersionExt::new(self.read_runtime_version.clone()));
 
 		extensions
 	}
-
-	/// Create `ExecutionManager` and `Extensions` for given offchain call.
-	///
-	/// Based on the execution context and capabilities it produces
-	/// the right manager and extensions object to support desired set of APIs.
-	pub fn manager_and_extensions<E: std::fmt::Debug>(
-		&self,
-		block_hash: Block::Hash,
-		block_number: NumberFor<Block>,
-		context: ExecutionContext,
-	) -> (ExecutionManager<DefaultHandler<E>>, Extensions) {
-		let manager = match context {
-			ExecutionContext::BlockConstruction => self.strategies.block_construction.get_manager(),
-			ExecutionContext::Syncing => self.strategies.syncing.get_manager(),
-			ExecutionContext::Importing => self.strategies.importing.get_manager(),
-			ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.is_all() =>
-				self.strategies.offchain_worker.get_manager(),
-			ExecutionContext::OffchainCall(_) => self.strategies.other.get_manager(),
-		};
-
-		(manager, self.extensions(block_hash, block_number, context))
-	}
 }
diff --git a/substrate/client/api/src/lib.rs b/substrate/client/api/src/lib.rs
index 0faddc10fe0..faadf3663a5 100644
--- a/substrate/client/api/src/lib.rs
+++ b/substrate/client/api/src/lib.rs
@@ -36,7 +36,7 @@ pub use proof_provider::*;
 pub use sp_blockchain as blockchain;
 pub use sp_blockchain::HeaderBackend;
 
-pub use sp_state_machine::{CompactProof, ExecutionStrategy, StorageProof};
+pub use sp_state_machine::{CompactProof, StorageProof};
 pub use sp_storage::{ChildInfo, PrefixedStorageKey, StorageData, StorageKey};
 
 /// Usage Information Provider interface
diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs
index f055d468882..7c57c77f3da 100644
--- a/substrate/client/block-builder/src/lib.rs
+++ b/substrate/client/block-builder/src/lib.rs
@@ -32,7 +32,7 @@ use sp_api::{
 	ApiExt, ApiRef, Core, ProvideRuntimeApi, StorageChanges, StorageProof, TransactionOutcome,
 };
 use sp_blockchain::{ApplyExtrinsicFailed, Error};
-use sp_core::ExecutionContext;
+use sp_core::traits::CallContext;
 use sp_runtime::{
 	legacy,
 	traits::{Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One},
@@ -178,11 +178,9 @@ where
 			api.record_proof();
 		}
 
-		api.initialize_block_with_context(
-			parent_hash,
-			ExecutionContext::BlockConstruction,
-			&header,
-		)?;
+		api.set_call_context(CallContext::Onchain);
+
+		api.initialize_block(parent_hash, &header)?;
 
 		let version = api
 			.api_version::<dyn BlockBuilderApi<Block>>(parent_hash)?
@@ -209,18 +207,10 @@ where
 		self.api.execute_in_transaction(|api| {
 			let res = if version < 6 {
 				#[allow(deprecated)]
-				api.apply_extrinsic_before_version_6_with_context(
-					parent_hash,
-					ExecutionContext::BlockConstruction,
-					xt.clone(),
-				)
-				.map(legacy::byte_sized_error::convert_to_latest)
+				api.apply_extrinsic_before_version_6(parent_hash, xt.clone())
+					.map(legacy::byte_sized_error::convert_to_latest)
 			} else {
-				api.apply_extrinsic_with_context(
-					parent_hash,
-					ExecutionContext::BlockConstruction,
-					xt.clone(),
-				)
+				api.apply_extrinsic(parent_hash, xt.clone())
 			};
 
 			match res {
@@ -242,9 +232,7 @@ where
 	/// supplied by `self.api`, combined as [`BuiltBlock`].
 	/// The storage proof will be `Some(_)` when proof recording was enabled.
 	pub fn build(mut self) -> Result<BuiltBlock<Block, backend::StateBackendFor<B, Block>>, Error> {
-		let header = self
-			.api
-			.finalize_block_with_context(self.parent_hash, ExecutionContext::BlockConstruction)?;
+		let header = self.api.finalize_block(self.parent_hash)?;
 
 		debug_assert_eq!(
 			header.extrinsics_root().clone(),
@@ -282,11 +270,7 @@ where
 			.execute_in_transaction(move |api| {
 				// `create_inherents` should not change any state, to ensure this we always rollback
 				// the transaction.
-				TransactionOutcome::Rollback(api.inherent_extrinsics_with_context(
-					parent_hash,
-					ExecutionContext::BlockConstruction,
-					inherent_data,
-				))
+				TransactionOutcome::Rollback(api.inherent_extrinsics(parent_hash, inherent_data))
 			})
 			.map_err(|e| Error::Application(Box::new(e)))
 	}
diff --git a/substrate/client/cli/src/arg_enums.rs b/substrate/client/cli/src/arg_enums.rs
index 982979605a3..40d86fd9798 100644
--- a/substrate/client/cli/src/arg_enums.rs
+++ b/substrate/client/cli/src/arg_enums.rs
@@ -161,17 +161,6 @@ pub enum ExecutionStrategy {
 	NativeElseWasm,
 }
 
-impl Into<sc_client_api::ExecutionStrategy> for ExecutionStrategy {
-	fn into(self) -> sc_client_api::ExecutionStrategy {
-		match self {
-			ExecutionStrategy::Native => sc_client_api::ExecutionStrategy::NativeWhenPossible,
-			ExecutionStrategy::Wasm => sc_client_api::ExecutionStrategy::AlwaysWasm,
-			ExecutionStrategy::Both => sc_client_api::ExecutionStrategy::Both,
-			ExecutionStrategy::NativeElseWasm => sc_client_api::ExecutionStrategy::NativeElseWasm,
-		}
-	}
-}
-
 /// Available RPC methods.
 #[allow(missing_docs)]
 #[derive(Debug, Copy, Clone, PartialEq, ValueEnum)]
@@ -270,16 +259,3 @@ impl Into<sc_network::config::SyncMode> for SyncMode {
 		}
 	}
 }
-
-/// Default value for the `--execution-syncing` parameter.
-pub const DEFAULT_EXECUTION_SYNCING: ExecutionStrategy = ExecutionStrategy::Wasm;
-/// Default value for the `--execution-import-block` parameter.
-pub const DEFAULT_EXECUTION_IMPORT_BLOCK: ExecutionStrategy = ExecutionStrategy::Wasm;
-/// Default value for the `--execution-import-block` parameter when the node is a validator.
-pub const DEFAULT_EXECUTION_IMPORT_BLOCK_VALIDATOR: ExecutionStrategy = ExecutionStrategy::Wasm;
-/// Default value for the `--execution-block-construction` parameter.
-pub const DEFAULT_EXECUTION_BLOCK_CONSTRUCTION: ExecutionStrategy = ExecutionStrategy::Wasm;
-/// Default value for the `--execution-offchain-worker` parameter.
-pub const DEFAULT_EXECUTION_OFFCHAIN_WORKER: ExecutionStrategy = ExecutionStrategy::Wasm;
-/// Default value for the `--execution-other` parameter.
-pub const DEFAULT_EXECUTION_OTHER: ExecutionStrategy = ExecutionStrategy::Wasm;
diff --git a/substrate/client/cli/src/config.rs b/substrate/client/cli/src/config.rs
index 04c62a73b40..4d218da6aa8 100644
--- a/substrate/client/cli/src/config.rs
+++ b/substrate/client/cli/src/config.rs
@@ -24,7 +24,6 @@ use crate::{
 };
 use log::warn;
 use names::{Generator, Name};
-use sc_client_api::execution_extensions::ExecutionStrategies;
 use sc_service::{
 	config::{
 		BasePath, Configuration, DatabaseSource, KeystoreConfig, NetworkConfiguration,
@@ -291,21 +290,6 @@ pub trait CliConfiguration<DCV: DefaultConfigurationValues = ()>: Sized {
 		self.import_params().map(|x| x.wasm_runtime_overrides()).unwrap_or_default()
 	}
 
-	/// Get the execution strategies.
-	///
-	/// By default this is retrieved from `ImportParams` if it is available. Otherwise its
-	/// `ExecutionStrategies::default()`.
-	fn execution_strategies(
-		&self,
-		is_dev: bool,
-		is_validator: bool,
-	) -> Result<ExecutionStrategies> {
-		Ok(self
-			.import_params()
-			.map(|x| x.execution_strategies(is_dev, is_validator))
-			.unwrap_or_default())
-	}
-
 	/// Get the RPC address.
 	fn rpc_addr(&self, _default_listen_port: u16) -> Result<Option<SocketAddr>> {
 		Ok(None)
@@ -508,7 +492,6 @@ pub trait CliConfiguration<DCV: DefaultConfigurationValues = ()>: Sized {
 			blocks_pruning: self.blocks_pruning()?,
 			wasm_method: self.wasm_method()?,
 			wasm_runtime_overrides: self.wasm_runtime_overrides(),
-			execution_strategies: self.execution_strategies(is_dev, is_validator)?,
 			rpc_addr: self.rpc_addr(DCV::rpc_listen_port())?,
 			rpc_methods: self.rpc_methods()?,
 			rpc_max_connections: self.rpc_max_connections()?,
diff --git a/substrate/client/cli/src/params/import_params.rs b/substrate/client/cli/src/params/import_params.rs
index 9e57a017e51..bfa54a35058 100644
--- a/substrate/client/cli/src/params/import_params.rs
+++ b/substrate/client/cli/src/params/import_params.rs
@@ -19,15 +19,11 @@
 use crate::{
 	arg_enums::{
 		ExecutionStrategy, WasmExecutionMethod, WasmtimeInstantiationStrategy,
-		DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, DEFAULT_EXECUTION_IMPORT_BLOCK,
-		DEFAULT_EXECUTION_IMPORT_BLOCK_VALIDATOR, DEFAULT_EXECUTION_OFFCHAIN_WORKER,
-		DEFAULT_EXECUTION_OTHER, DEFAULT_EXECUTION_SYNCING,
 		DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, DEFAULT_WASM_EXECUTION_METHOD,
 	},
 	params::{DatabaseParams, PruningParams},
 };
 use clap::Args;
-use sc_client_api::execution_extensions::ExecutionStrategies;
 use std::path::PathBuf;
 
 /// Parameters for block import.
@@ -104,6 +100,8 @@ impl ImportParams {
 
 	/// Get the WASM execution method from the parameters
 	pub fn wasm_method(&self) -> sc_service::config::WasmExecutionMethod {
+		self.execution_strategies.check_usage_and_print_deprecation_warning();
+
 		crate::execution_method_from_cli(self.wasm_method, self.wasmtime_instantiation_strategy)
 	}
 
@@ -112,36 +110,6 @@ impl ImportParams {
 	pub fn wasm_runtime_overrides(&self) -> Option<PathBuf> {
 		self.wasm_runtime_overrides.clone()
 	}
-
-	/// Get execution strategies for the parameters
-	pub fn execution_strategies(&self, is_dev: bool, is_validator: bool) -> ExecutionStrategies {
-		let exec = &self.execution_strategies;
-		let exec_all_or = |strat: Option<ExecutionStrategy>, default: ExecutionStrategy| {
-			let default = if is_dev { ExecutionStrategy::Native } else { default };
-
-			exec.execution.unwrap_or_else(|| strat.unwrap_or(default)).into()
-		};
-
-		let default_execution_import_block = if is_validator {
-			DEFAULT_EXECUTION_IMPORT_BLOCK_VALIDATOR
-		} else {
-			DEFAULT_EXECUTION_IMPORT_BLOCK
-		};
-
-		ExecutionStrategies {
-			syncing: exec_all_or(exec.execution_syncing, DEFAULT_EXECUTION_SYNCING),
-			importing: exec_all_or(exec.execution_import_block, default_execution_import_block),
-			block_construction: exec_all_or(
-				exec.execution_block_construction,
-				DEFAULT_EXECUTION_BLOCK_CONSTRUCTION,
-			),
-			offchain_worker: exec_all_or(
-				exec.execution_offchain_worker,
-				DEFAULT_EXECUTION_OFFCHAIN_WORKER,
-			),
-			other: exec_all_or(exec.execution_other, DEFAULT_EXECUTION_OTHER),
-		}
-	}
 }
 
 /// Execution strategies parameters.
@@ -186,3 +154,23 @@ pub struct ExecutionStrategiesParams {
 	)]
 	pub execution: Option<ExecutionStrategy>,
 }
+
+impl ExecutionStrategiesParams {
+	/// Check if one of the parameters is still passed and print a warning if so.
+	fn check_usage_and_print_deprecation_warning(&self) {
+		for (param, name) in [
+			(&self.execution_syncing, "execution-syncing"),
+			(&self.execution_import_block, "execution-import-block"),
+			(&self.execution_block_construction, "execution-block-construction"),
+			(&self.execution_offchain_worker, "execution-offchain-worker"),
+			(&self.execution_other, "execution-other"),
+			(&self.execution, "execution"),
+		] {
+			if param.is_some() {
+				eprintln!(
+					"CLI parameter `--{name}` has no effect anymore and will be removed in the future!"
+				);
+			}
+		}
+	}
+}
diff --git a/substrate/client/cli/src/runner.rs b/substrate/client/cli/src/runner.rs
index 66c3fe41b6c..c96f494354f 100644
--- a/substrate/client/cli/src/runner.rs
+++ b/substrate/client/cli/src/runner.rs
@@ -265,7 +265,6 @@ mod tests {
 				)),
 				wasm_method: Default::default(),
 				wasm_runtime_overrides: None,
-				execution_strategies: Default::default(),
 				rpc_addr: None,
 				rpc_max_connections: Default::default(),
 				rpc_cors: None,
diff --git a/substrate/client/consensus/aura/src/import_queue.rs b/substrate/client/consensus/aura/src/import_queue.rs
index ef7a2a1cc86..0b9ceb4fe77 100644
--- a/substrate/client/consensus/aura/src/import_queue.rs
+++ b/substrate/client/consensus/aura/src/import_queue.rs
@@ -38,7 +38,7 @@ use sp_blockchain::HeaderBackend;
 use sp_consensus::Error as ConsensusError;
 use sp_consensus_aura::{inherents::AuraInherentData, AuraApi};
 use sp_consensus_slots::Slot;
-use sp_core::{crypto::Pair, ExecutionContext};
+use sp_core::crypto::Pair;
 use sp_inherents::{CreateInherentDataProviders, InherentDataProvider as _};
 use sp_runtime::{
 	traits::{Block as BlockT, Header, NumberFor},
@@ -138,7 +138,6 @@ where
 		at_hash: B::Hash,
 		inherent_data: sp_inherents::InherentData,
 		create_inherent_data_providers: CIDP::InherentDataProviders,
-		execution_context: ExecutionContext,
 	) -> Result<(), Error<B>>
 	where
 		C: ProvideRuntimeApi<B>,
@@ -148,7 +147,7 @@ where
 		let inherent_res = self
 			.client
 			.runtime_api()
-			.check_inherents_with_context(at_hash, execution_context, block, inherent_data)
+			.check_inherents(at_hash, block, inherent_data)
 			.map_err(|e| Error::Client(e.into()))?;
 
 		if !inherent_res.ok() {
@@ -249,7 +248,6 @@ where
 							parent_hash,
 							inherent_data,
 							create_inherent_data_providers,
-							block.origin.into(),
 						)
 						.await
 						.map_err(|e| e.to_string())?;
diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml
index 28bd28c812b..e036ff1e64c 100644
--- a/substrate/client/consensus/babe/Cargo.toml
+++ b/substrate/client/consensus/babe/Cargo.toml
@@ -31,6 +31,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/commo
 sc-consensus-epochs = { version = "0.10.0-dev", path = "../epochs" }
 sc-consensus-slots = { version = "0.10.0-dev", path = "../slots" }
 sc-telemetry = { version = "4.0.0-dev", path = "../../telemetry" }
+sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../transaction-pool/api" }
 sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" }
 sp-application-crypto = { version = "23.0.0", path = "../../../primitives/application-crypto" }
 sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" }
diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml
index c0b20eb11c9..7b16ea84c43 100644
--- a/substrate/client/consensus/babe/rpc/Cargo.toml
+++ b/substrate/client/consensus/babe/rpc/Cargo.toml
@@ -34,5 +34,6 @@ serde_json = "1.0.85"
 tokio = "1.22.0"
 sc-consensus = { version = "0.10.0-dev", path = "../../../consensus/common" }
 sc-keystore = { version = "4.0.0-dev", path = "../../../keystore" }
+sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../transaction-pool/api" }
 sp-keyring = { version = "24.0.0", path = "../../../../primitives/keyring" }
 substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" }
diff --git a/substrate/client/consensus/babe/rpc/src/lib.rs b/substrate/client/consensus/babe/rpc/src/lib.rs
index 1ae15cc5453..bffe026ea6e 100644
--- a/substrate/client/consensus/babe/rpc/src/lib.rs
+++ b/substrate/client/consensus/babe/rpc/src/lib.rs
@@ -186,6 +186,8 @@ impl From<Error> for JsonRpseeError {
 #[cfg(test)]
 mod tests {
 	use super::*;
+	use sc_consensus_babe::ImportQueueParams;
+	use sc_transaction_pool_api::{OffchainTransactionPoolFactory, RejectAllTxPool};
 	use sp_consensus_babe::inherents::InherentDataProvider;
 	use sp_core::{crypto::key_types::BABE, testing::TaskExecutor};
 	use sp_keyring::Sr25519Keyring;
@@ -219,22 +221,25 @@ mod tests {
 			sc_consensus_babe::block_import(config.clone(), client.clone(), client.clone())
 				.expect("can initialize block-import");
 
-		let (_, babe_worker_handle) = sc_consensus_babe::import_queue(
-			link.clone(),
-			block_import.clone(),
-			None,
-			client.clone(),
-			longest_chain.clone(),
-			move |_, _| async move {
+		let (_, babe_worker_handle) = sc_consensus_babe::import_queue(ImportQueueParams {
+			link: link.clone(),
+			block_import: block_import.clone(),
+			justification_import: None,
+			client: client.clone(),
+			select_chain: longest_chain.clone(),
+			create_inherent_data_providers: move |_, _| async move {
 				Ok((InherentDataProvider::from_timestamp_and_slot_duration(
 					0.into(),
 					slot_duration,
 				),))
 			},
-			&task_executor,
-			None,
-			None,
-		)
+			spawner: &task_executor,
+			registry: None,
+			telemetry: None,
+			offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(
+				RejectAllTxPool::default(),
+			),
+		})
 		.unwrap();
 
 		Babe::new(client.clone(), babe_worker_handle, keystore, longest_chain, deny_unsafe)
diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs
index 64ff00bb58c..76bd670c200 100644
--- a/substrate/client/consensus/babe/src/lib.rs
+++ b/substrate/client/consensus/babe/src/lib.rs
@@ -106,6 +106,7 @@ use sc_consensus_slots::{
 	SlotInfo, StorageChanges,
 };
 use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE};
+use sc_transaction_pool_api::OffchainTransactionPoolFactory;
 use sp_api::{ApiExt, ProvideRuntimeApi};
 use sp_application_crypto::AppCrypto;
 use sp_block_builder::BlockBuilder as BlockBuilderApi;
@@ -116,7 +117,7 @@ use sp_blockchain::{
 use sp_consensus::{BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain};
 use sp_consensus_babe::inherents::BabeInherentData;
 use sp_consensus_slots::Slot;
-use sp_core::ExecutionContext;
+use sp_core::traits::SpawnEssentialNamed;
 use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider};
 use sp_keystore::KeystorePtr;
 use sp_runtime::{
@@ -992,6 +993,7 @@ pub struct BabeVerifier<Block: BlockT, Client, SelectChain, CIDP> {
 	config: BabeConfiguration,
 	epoch_changes: SharedEpochChanges<Block, Epoch>,
 	telemetry: Option<TelemetryHandle>,
+	offchain_tx_pool_factory: OffchainTransactionPoolFactory<Block>,
 }
 
 impl<Block, Client, SelectChain, CIDP> BabeVerifier<Block, Client, SelectChain, CIDP>
@@ -1008,12 +1010,11 @@ where
 		at_hash: Block::Hash,
 		inherent_data: InherentData,
 		create_inherent_data_providers: CIDP::InherentDataProviders,
-		execution_context: ExecutionContext,
 	) -> Result<(), Error<Block>> {
 		let inherent_res = self
 			.client
 			.runtime_api()
-			.check_inherents_with_context(at_hash, execution_context, block, inherent_data)
+			.check_inherents(at_hash, block, inherent_data)
 			.map_err(Error::RuntimeApi)?;
 
 		if !inherent_res.ok() {
@@ -1098,8 +1099,13 @@ where
 		};
 
 		// submit equivocation report at best block.
-		self.client
-			.runtime_api()
+		let mut runtime_api = self.client.runtime_api();
+
+		// Register the offchain tx pool to be able to use it from the runtime.
+		runtime_api
+			.register_extension(self.offchain_tx_pool_factory.offchain_transaction_pool(best_hash));
+
+		runtime_api
 			.submit_report_equivocation_unsigned_extrinsic(
 				best_hash,
 				equivocation_proof,
@@ -1250,7 +1256,6 @@ where
 							parent_hash,
 							inherent_data,
 							create_inherent_data_providers,
-							block.origin.into(),
 						)
 						.await?;
 					}
@@ -1768,6 +1773,38 @@ where
 	Ok((import, link))
 }
 
+/// Parameters passed to [`import_queue`].
+pub struct ImportQueueParams<'a, Block: BlockT, BI, Client, CIDP, SelectChain, Spawn> {
+	/// The BABE link that is created by [`block_import`].
+	pub link: BabeLink<Block>,
+	/// The block import that should be wrapped.
+	pub block_import: BI,
+	/// Optional justification import.
+	pub justification_import: Option<BoxJustificationImport<Block>>,
+	/// The client to interact with the internals of the node.
+	pub client: Arc<Client>,
+	/// A [`SelectChain`] implementation.
+	///
+	/// Used to determine the best block that should be used as basis when sending an equivocation
+	/// report.
+	pub select_chain: SelectChain,
+	/// Used to crate the inherent data providers.
+	///
+	/// These inherent data providers are then used to create the inherent data that is
+	/// passed to the `check_inherents` runtime call.
+	pub create_inherent_data_providers: CIDP,
+	/// Spawner for spawning futures.
+	pub spawner: &'a Spawn,
+	/// Registry for prometheus metrics.
+	pub registry: Option<&'a Registry>,
+	/// Optional telemetry handle to report telemetry events.
+	pub telemetry: Option<TelemetryHandle>,
+	/// The offchain transaction pool factory.
+	///
+	/// Will be used when sending equivocation reports.
+	pub offchain_tx_pool_factory: OffchainTransactionPoolFactory<Block>,
+}
+
 /// Start an import queue for the BABE consensus algorithm.
 ///
 /// This method returns the import queue, some data that needs to be passed to the block authoring
@@ -1777,19 +1814,22 @@ where
 ///
 /// The block import object provided must be the `BabeBlockImport` or a wrapper
 /// of it, otherwise crucial import logic will be omitted.
-pub fn import_queue<Block: BlockT, Client, SelectChain, Inner, CIDP>(
-	babe_link: BabeLink<Block>,
-	block_import: Inner,
-	justification_import: Option<BoxJustificationImport<Block>>,
-	client: Arc<Client>,
-	select_chain: SelectChain,
-	create_inherent_data_providers: CIDP,
-	spawner: &impl sp_core::traits::SpawnEssentialNamed,
-	registry: Option<&Registry>,
-	telemetry: Option<TelemetryHandle>,
+pub fn import_queue<Block: BlockT, Client, SelectChain, BI, CIDP, Spawn>(
+	ImportQueueParams {
+		link: babe_link,
+		block_import,
+		justification_import,
+		client,
+		select_chain,
+		create_inherent_data_providers,
+		spawner,
+		registry,
+		telemetry,
+		offchain_tx_pool_factory,
+	}: ImportQueueParams<'_, Block, BI, Client, CIDP, SelectChain, Spawn>,
 ) -> ClientResult<(DefaultImportQueue<Block, Client>, BabeWorkerHandle<Block>)>
 where
-	Inner: BlockImport<
+	BI: BlockImport<
 			Block,
 			Error = ConsensusError,
 			Transaction = sp_api::TransactionFor<Client, Block>,
@@ -1807,6 +1847,7 @@ where
 	SelectChain: sp_consensus::SelectChain<Block> + 'static,
 	CIDP: CreateInherentDataProviders<Block, ()> + Send + Sync + 'static,
 	CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync,
+	Spawn: SpawnEssentialNamed,
 {
 	const HANDLE_BUFFER_SIZE: usize = 1024;
 
@@ -1817,6 +1858,7 @@ where
 		epoch_changes: babe_link.epoch_changes.clone(),
 		telemetry,
 		client: client.clone(),
+		offchain_tx_pool_factory,
 	};
 
 	let (worker_tx, worker_rx) = channel(HANDLE_BUFFER_SIZE);
diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs
index 59b4076e2fd..384e45228b5 100644
--- a/substrate/client/consensus/babe/src/tests.rs
+++ b/substrate/client/consensus/babe/src/tests.rs
@@ -26,6 +26,7 @@ use sc_consensus::{BoxBlockImport, BoxJustificationImport};
 use sc_consensus_epochs::{EpochIdentifier, EpochIdentifierPosition};
 use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging;
 use sc_network_test::{Block as TestBlock, *};
+use sc_transaction_pool_api::RejectAllTxPool;
 use sp_application_crypto::key_types::BABE;
 use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal};
 use sp_consensus_babe::{
@@ -283,6 +284,9 @@ impl TestNetFactory for BabeTestNet {
 				config: data.link.config.clone(),
 				epoch_changes: data.link.epoch_changes.clone(),
 				telemetry: None,
+				offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(
+					RejectAllTxPool::default(),
+				),
 			},
 			mutator: MUTATOR.with(|m| m.borrow().clone()),
 		}
diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml
index 13b472dc703..af39c640122 100644
--- a/substrate/client/consensus/grandpa/Cargo.toml
+++ b/substrate/client/consensus/grandpa/Cargo.toml
@@ -32,6 +32,7 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.
 sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" }
 sc-chain-spec = { version = "4.0.0-dev", path = "../../../client/chain-spec" }
 sc-client-api = { version = "4.0.0-dev", path = "../../api" }
+sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../transaction-pool/api" }
 sc-consensus = { version = "0.10.0-dev", path = "../common" }
 sc-network = { version = "0.10.0-dev", path = "../../network" }
 sc-network-gossip = { version = "0.10.0-dev", path = "../../network-gossip" }
diff --git a/substrate/client/consensus/grandpa/src/environment.rs b/substrate/client/consensus/grandpa/src/environment.rs
index 67820a59cc9..eeb9ff44682 100644
--- a/substrate/client/consensus/grandpa/src/environment.rs
+++ b/substrate/client/consensus/grandpa/src/environment.rs
@@ -40,6 +40,8 @@ use sc_client_api::{
 	utils::is_descendent_of,
 };
 use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO};
+use sc_transaction_pool_api::OffchainTransactionPoolFactory;
+use sp_api::ApiExt;
 use sp_blockchain::HeaderMetadata;
 use sp_consensus::SelectChain as SelectChainT;
 use sp_consensus_grandpa::{
@@ -444,6 +446,7 @@ pub(crate) struct Environment<
 	pub(crate) metrics: Option<Metrics>,
 	pub(crate) justification_sender: Option<GrandpaJustificationSender<Block>>,
 	pub(crate) telemetry: Option<TelemetryHandle>,
+	pub(crate) offchain_tx_pool_factory: OffchainTransactionPoolFactory<Block>,
 	pub(crate) _phantom: PhantomData<Backend>,
 }
 
@@ -570,8 +573,13 @@ where
 		// submit equivocation report at **best** block
 		let equivocation_proof = EquivocationProof::new(authority_set.set_id, equivocation);
 
-		self.client
-			.runtime_api()
+		let mut runtime_api = self.client.runtime_api();
+
+		runtime_api.register_extension(
+			self.offchain_tx_pool_factory.offchain_transaction_pool(best_block_hash),
+		);
+
+		runtime_api
 			.submit_report_equivocation_unsigned_extrinsic(
 				best_block_hash,
 				equivocation_proof,
diff --git a/substrate/client/consensus/grandpa/src/lib.rs b/substrate/client/consensus/grandpa/src/lib.rs
index 9c0aa60c24e..c888340f70a 100644
--- a/substrate/client/consensus/grandpa/src/lib.rs
+++ b/substrate/client/consensus/grandpa/src/lib.rs
@@ -64,12 +64,13 @@ use prometheus_endpoint::{PrometheusError, Registry};
 use sc_client_api::{
 	backend::{AuxStore, Backend},
 	utils::is_descendent_of,
-	BlockchainEvents, CallExecutor, ExecutionStrategy, ExecutorProvider, Finalizer, LockImportRun,
-	StorageProvider, TransactionFor,
+	BlockchainEvents, CallExecutor, ExecutorProvider, Finalizer, LockImportRun, StorageProvider,
+	TransactionFor,
 };
 use sc_consensus::BlockImport;
 use sc_network::types::ProtocolName;
 use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO};
+use sc_transaction_pool_api::OffchainTransactionPoolFactory;
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver};
 use sp_api::ProvideRuntimeApi;
 use sp_application_crypto::AppCrypto;
@@ -479,7 +480,6 @@ where
 				self.expect_block_hash_from_id(&BlockId::Number(Zero::zero()))?,
 				"GrandpaApi_grandpa_authorities",
 				&[],
-				ExecutionStrategy::NativeElseWasm,
 				CallContext::Offchain,
 			)
 			.and_then(|call_result| {
@@ -688,6 +688,11 @@ pub struct GrandpaParams<Block: BlockT, C, N, S, SC, VR> {
 	pub shared_voter_state: SharedVoterState,
 	/// TelemetryHandle instance.
 	pub telemetry: Option<TelemetryHandle>,
+	/// Offchain transaction pool factory.
+	///
+	/// This will be used to create an offchain transaction pool instance for sending an
+	/// equivocation report from the runtime.
+	pub offchain_tx_pool_factory: OffchainTransactionPoolFactory<Block>,
 }
 
 /// Returns the configuration value to put in
@@ -736,6 +741,7 @@ where
 		prometheus_registry,
 		shared_voter_state,
 		telemetry,
+		offchain_tx_pool_factory,
 	} = grandpa_params;
 
 	// NOTE: we have recently removed `run_grandpa_observer` from the public
@@ -810,6 +816,7 @@ where
 		shared_voter_state,
 		justification_sender,
 		telemetry,
+		offchain_tx_pool_factory,
 	);
 
 	let voter_work = voter_work.map(|res| match res {
@@ -879,6 +886,7 @@ where
 		shared_voter_state: SharedVoterState,
 		justification_sender: GrandpaJustificationSender<Block>,
 		telemetry: Option<TelemetryHandle>,
+		offchain_tx_pool_factory: OffchainTransactionPoolFactory<Block>,
 	) -> Self {
 		let metrics = match prometheus_registry.as_ref().map(Metrics::register) {
 			Some(Ok(metrics)) => Some(metrics),
@@ -903,6 +911,7 @@ where
 			metrics: metrics.as_ref().map(|m| m.environment.clone()),
 			justification_sender: Some(justification_sender),
 			telemetry: telemetry.clone(),
+			offchain_tx_pool_factory,
 			_phantom: PhantomData,
 		});
 
@@ -1054,6 +1063,7 @@ where
 					metrics: self.env.metrics.clone(),
 					justification_sender: self.env.justification_sender.clone(),
 					telemetry: self.telemetry.clone(),
+					offchain_tx_pool_factory: self.env.offchain_tx_pool_factory.clone(),
 					_phantom: PhantomData,
 				});
 
diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs
index c46e249be48..4fbeed71a1b 100644
--- a/substrate/client/consensus/grandpa/src/tests.rs
+++ b/substrate/client/consensus/grandpa/src/tests.rs
@@ -33,6 +33,7 @@ use sc_network_test::{
 	Block, BlockImportAdapter, FullPeerConfig, Hash, PassThroughVerifier, Peer, PeersClient,
 	PeersFullClient, TestClient, TestNetFactory,
 };
+use sc_transaction_pool_api::RejectAllTxPool;
 use sp_api::{ApiRef, ProvideRuntimeApi};
 use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain};
 use sp_consensus_grandpa::{
@@ -331,6 +332,9 @@ fn initialize_grandpa(
 			voting_rule: (),
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
+			offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(
+				RejectAllTxPool::default(),
+			),
 			telemetry: None,
 		};
 		let voter =
@@ -481,6 +485,9 @@ async fn finalize_3_voters_1_full_observer() {
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
 			telemetry: None,
+			offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(
+				RejectAllTxPool::default(),
+			),
 		};
 
 		run_grandpa_voter(grandpa_params).expect("all in order with client and network")
@@ -573,6 +580,9 @@ async fn transition_3_voters_twice_1_full_observer() {
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
 			telemetry: None,
+			offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(
+				RejectAllTxPool::default(),
+			),
 		};
 
 		voters
@@ -1040,6 +1050,9 @@ async fn voter_persists_its_votes() {
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
 			telemetry: None,
+			offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(
+				RejectAllTxPool::default(),
+			),
 		};
 
 		run_grandpa_voter(grandpa_params).expect("all in order with client and network")
@@ -1083,6 +1096,9 @@ async fn voter_persists_its_votes() {
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
 			telemetry: None,
+			offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(
+				RejectAllTxPool::default(),
+			),
 		};
 
 		run_grandpa_voter(grandpa_params)
@@ -1293,6 +1309,9 @@ async fn voter_catches_up_to_latest_round_when_behind() {
 			prometheus_registry: None,
 			shared_voter_state: SharedVoterState::empty(),
 			telemetry: None,
+			offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(
+				RejectAllTxPool::default(),
+			),
 		};
 
 		Box::pin(run_grandpa_voter(grandpa_params).expect("all in order with client and network"))
@@ -1422,6 +1441,7 @@ where
 		justification_sender: None,
 		telemetry: None,
 		_phantom: PhantomData,
+		offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(RejectAllTxPool::default()),
 	}
 }
 
@@ -1986,7 +2006,7 @@ async fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() {
 	// keys it should work
 	equivocation.identity = TryFrom::try_from(&[1; 32][..]).unwrap();
 	let equivocation_proof = sp_consensus_grandpa::Equivocation::Prevote(equivocation);
-	assert!(environment.report_equivocation(equivocation_proof).is_ok());
+	environment.report_equivocation(equivocation_proof).unwrap();
 }
 
 #[tokio::test]
diff --git a/substrate/client/consensus/pow/src/lib.rs b/substrate/client/consensus/pow/src/lib.rs
index 913686b7bf3..763cf10e6cd 100644
--- a/substrate/client/consensus/pow/src/lib.rs
+++ b/substrate/client/consensus/pow/src/lib.rs
@@ -58,7 +58,6 @@ use sp_block_builder::BlockBuilder as BlockBuilderApi;
 use sp_blockchain::HeaderBackend;
 use sp_consensus::{Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle};
 use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID};
-use sp_core::ExecutionContext;
 use sp_inherents::{CreateInherentDataProviders, InherentDataProvider};
 use sp_runtime::{
 	generic::{BlockId, Digest, DigestItem},
@@ -269,7 +268,6 @@ where
 		block: B,
 		at_hash: B::Hash,
 		inherent_data_providers: CIDP::InherentDataProviders,
-		execution_context: ExecutionContext,
 	) -> Result<(), Error<B>> {
 		if *block.header().number() < self.check_inherents_after {
 			return Ok(())
@@ -283,7 +281,7 @@ where
 		let inherent_res = self
 			.client
 			.runtime_api()
-			.check_inherents_with_context(at_hash, execution_context, block, inherent_data)
+			.check_inherents(at_hash, block, inherent_data)
 			.map_err(|e| Error::Client(e.into()))?;
 
 		if !inherent_res.ok() {
@@ -348,7 +346,6 @@ where
 					self.create_inherent_data_providers
 						.create_inherent_data_providers(parent_hash, ())
 						.await?,
-					block.origin.into(),
 				)
 				.await?;
 			}
diff --git a/substrate/client/merkle-mountain-range/rpc/src/lib.rs b/substrate/client/merkle-mountain-range/rpc/src/lib.rs
index daf2cd1ec29..5be82b600d9 100644
--- a/substrate/client/merkle-mountain-range/rpc/src/lib.rs
+++ b/substrate/client/merkle-mountain-range/rpc/src/lib.rs
@@ -30,9 +30,12 @@ use jsonrpsee::{
 };
 use serde::{Deserialize, Serialize};
 
-use sp_api::{NumberFor, ProvideRuntimeApi};
+use sp_api::{ApiExt, NumberFor, ProvideRuntimeApi};
 use sp_blockchain::HeaderBackend;
-use sp_core::Bytes;
+use sp_core::{
+	offchain::{storage::OffchainDb, OffchainDbExt, OffchainStorage},
+	Bytes,
+};
 use sp_mmr_primitives::{Error as MmrError, Proof};
 use sp_runtime::traits::Block as BlockT;
 
@@ -127,26 +130,28 @@ pub trait MmrApi<BlockHash, BlockNumber, MmrHash> {
 }
 
 /// MMR RPC methods.
-pub struct Mmr<Client, Block> {
+pub struct Mmr<Client, Block, S> {
 	client: Arc<Client>,
+	offchain_db: OffchainDb<S>,
 	_marker: PhantomData<Block>,
 }
 
-impl<C, B> Mmr<C, B> {
+impl<C, B, S> Mmr<C, B, S> {
 	/// Create new `Mmr` with the given reference to the client.
-	pub fn new(client: Arc<C>) -> Self {
-		Self { client, _marker: Default::default() }
+	pub fn new(client: Arc<C>, offchain_storage: S) -> Self {
+		Self { client, _marker: Default::default(), offchain_db: OffchainDb::new(offchain_storage) }
 	}
 }
 
 #[async_trait]
-impl<Client, Block, MmrHash> MmrApiServer<<Block as BlockT>::Hash, NumberFor<Block>, MmrHash>
-	for Mmr<Client, (Block, MmrHash)>
+impl<Client, Block, MmrHash, S> MmrApiServer<<Block as BlockT>::Hash, NumberFor<Block>, MmrHash>
+	for Mmr<Client, (Block, MmrHash), S>
 where
 	Block: BlockT,
 	Client: Send + Sync + 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block>,
 	Client::Api: MmrRuntimeApi<Block, MmrHash, NumberFor<Block>>,
 	MmrHash: Codec + Send + Sync + 'static,
+	S: OffchainStorage + 'static,
 {
 	fn mmr_root(&self, at: Option<<Block as BlockT>::Hash>) -> RpcResult<MmrHash> {
 		let block_hash = at.unwrap_or_else(||
@@ -166,18 +171,15 @@ where
 		best_known_block_number: Option<NumberFor<Block>>,
 		at: Option<<Block as BlockT>::Hash>,
 	) -> RpcResult<LeavesProof<<Block as BlockT>::Hash>> {
-		let api = self.client.runtime_api();
+		let mut api = self.client.runtime_api();
 		let block_hash = at.unwrap_or_else(||
 			// If the block hash is not supplied assume the best block.
 			self.client.info().best_hash);
 
+		api.register_extension(OffchainDbExt::new(self.offchain_db.clone()));
+
 		let (leaves, proof) = api
-			.generate_proof_with_context(
-				block_hash,
-				sp_core::ExecutionContext::OffchainCall(None),
-				block_numbers,
-				best_known_block_number,
-			)
+			.generate_proof(block_hash, block_numbers, best_known_block_number)
 			.map_err(runtime_error_into_rpc_error)?
 			.map_err(mmr_error_into_rpc_error)?;
 
@@ -185,7 +187,7 @@ where
 	}
 
 	fn verify_proof(&self, proof: LeavesProof<<Block as BlockT>::Hash>) -> RpcResult<bool> {
-		let api = self.client.runtime_api();
+		let mut api = self.client.runtime_api();
 
 		let leaves = Decode::decode(&mut &proof.leaves.0[..])
 			.map_err(|e| CallError::InvalidParams(anyhow::Error::new(e)))?;
@@ -193,14 +195,11 @@ where
 		let decoded_proof = Decode::decode(&mut &proof.proof.0[..])
 			.map_err(|e| CallError::InvalidParams(anyhow::Error::new(e)))?;
 
-		api.verify_proof_with_context(
-			proof.block_hash,
-			sp_core::ExecutionContext::OffchainCall(None),
-			leaves,
-			decoded_proof,
-		)
-		.map_err(runtime_error_into_rpc_error)?
-		.map_err(mmr_error_into_rpc_error)?;
+		api.register_extension(OffchainDbExt::new(self.offchain_db.clone()));
+
+		api.verify_proof(proof.block_hash, leaves, decoded_proof)
+			.map_err(runtime_error_into_rpc_error)?
+			.map_err(mmr_error_into_rpc_error)?;
 
 		Ok(true)
 	}
diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml
index 42504ec590c..f52b0aa2878 100644
--- a/substrate/client/offchain/Cargo.toml
+++ b/substrate/client/offchain/Cargo.toml
@@ -30,11 +30,16 @@ threadpool = "1.7"
 tracing = "0.1.29"
 sc-client-api = { version = "4.0.0-dev", path = "../api" }
 sc-network = { version = "0.10.0-dev", path = "../network" }
+sc-network-common = { version = "0.10.0-dev", path = "../network/common" }
+sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" }
 sc-utils = { version = "4.0.0-dev", path = "../utils" }
 sp-api = { version = "4.0.0-dev", path = "../../primitives/api" }
 sp-core = { version = "21.0.0", path = "../../primitives/core" }
 sp-offchain = { version = "4.0.0-dev", path = "../../primitives/offchain" }
 sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" }
+sp-keystore = { version = "0.27.0", path = "../../primitives/keystore" }
+sp-externalities = { version = "0.19.0", path = "../../primitives/externalities" }
+log = "0.4.17"
 
 [dev-dependencies]
 lazy_static = "1.4.0"
diff --git a/substrate/client/offchain/src/api.rs b/substrate/client/offchain/src/api.rs
index 33ddd46c7a3..e6b0e30f203 100644
--- a/substrate/client/offchain/src/api.rs
+++ b/substrate/client/offchain/src/api.rs
@@ -25,8 +25,8 @@ pub use http::SharedClient;
 use libp2p::{Multiaddr, PeerId};
 use sp_core::{
 	offchain::{
-		self, HttpError, HttpRequestId, HttpRequestStatus, OffchainStorage, OpaqueMultiaddr,
-		OpaqueNetworkState, StorageKind, Timestamp,
+		self, HttpError, HttpRequestId, HttpRequestStatus, OpaqueMultiaddr, OpaqueNetworkState,
+		Timestamp,
 	},
 	OpaquePeerId,
 };
@@ -36,110 +36,6 @@ mod http;
 
 mod timestamp;
 
-fn unavailable_yet<R: Default>(name: &str) -> R {
-	tracing::error!(
-		target: super::LOG_TARGET,
-		"The {:?} API is not available for offchain workers yet. Follow \
-		https://github.com/paritytech/substrate/issues/1458 for details",
-		name
-	);
-	Default::default()
-}
-
-const LOCAL_DB: &str = "LOCAL (fork-aware) DB";
-
-/// Offchain DB reference.
-#[derive(Debug, Clone)]
-pub struct Db<Storage> {
-	/// Persistent storage database.
-	persistent: Storage,
-}
-
-impl<Storage: OffchainStorage> Db<Storage> {
-	/// Create new instance of Offchain DB.
-	pub fn new(persistent: Storage) -> Self {
-		Self { persistent }
-	}
-
-	/// Create new instance of Offchain DB, backed by given backend.
-	pub fn factory_from_backend<Backend, Block>(
-		backend: &Backend,
-	) -> Option<Box<dyn sc_client_api::execution_extensions::DbExternalitiesFactory>>
-	where
-		Backend: sc_client_api::Backend<Block, OffchainStorage = Storage>,
-		Block: sp_runtime::traits::Block,
-		Storage: 'static,
-	{
-		sc_client_api::Backend::offchain_storage(backend).map(|db| Box::new(Self::new(db)) as _)
-	}
-}
-
-impl<Storage: OffchainStorage> offchain::DbExternalities for Db<Storage> {
-	fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) {
-		tracing::debug!(
-			target: "offchain-worker::storage",
-			?kind,
-			key = ?array_bytes::bytes2hex("", key),
-			value = ?array_bytes::bytes2hex("", value),
-			"Write",
-		);
-		match kind {
-			StorageKind::PERSISTENT => self.persistent.set(STORAGE_PREFIX, key, value),
-			StorageKind::LOCAL => unavailable_yet(LOCAL_DB),
-		}
-	}
-
-	fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) {
-		tracing::debug!(
-			target: "offchain-worker::storage",
-			?kind,
-			key = ?array_bytes::bytes2hex("", key),
-			"Clear",
-		);
-		match kind {
-			StorageKind::PERSISTENT => self.persistent.remove(STORAGE_PREFIX, key),
-			StorageKind::LOCAL => unavailable_yet(LOCAL_DB),
-		}
-	}
-
-	fn local_storage_compare_and_set(
-		&mut self,
-		kind: StorageKind,
-		key: &[u8],
-		old_value: Option<&[u8]>,
-		new_value: &[u8],
-	) -> bool {
-		tracing::debug!(
-			target: "offchain-worker::storage",
-			?kind,
-			key = ?array_bytes::bytes2hex("", key),
-			new_value = ?array_bytes::bytes2hex("", new_value),
-			old_value = ?old_value.as_ref().map(|s| array_bytes::bytes2hex("", s)),
-			"CAS",
-		);
-		match kind {
-			StorageKind::PERSISTENT =>
-				self.persistent.compare_and_set(STORAGE_PREFIX, key, old_value, new_value),
-			StorageKind::LOCAL => unavailable_yet(LOCAL_DB),
-		}
-	}
-
-	fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option<Vec<u8>> {
-		let result = match kind {
-			StorageKind::PERSISTENT => self.persistent.get(STORAGE_PREFIX, key),
-			StorageKind::LOCAL => unavailable_yet(LOCAL_DB),
-		};
-		tracing::debug!(
-			target: "offchain-worker::storage",
-			?kind,
-			key = ?array_bytes::bytes2hex("", key),
-			result = ?result.as_ref().map(|s| array_bytes::bytes2hex("", s)),
-			"Read",
-		);
-		result
-	}
-}
-
 /// Asynchronous offchain API.
 ///
 /// NOTE this is done to prevent recursive calls into the runtime
@@ -329,7 +225,7 @@ mod tests {
 		config::MultiaddrWithPeerId, types::ProtocolName, NetworkPeers, NetworkStateInfo,
 		ReputationChange,
 	};
-	use sp_core::offchain::{DbExternalities, Externalities};
+	use sp_core::offchain::{storage::OffchainDb, DbExternalities, Externalities, StorageKind};
 	use std::time::SystemTime;
 
 	pub(super) struct TestNetwork();
@@ -418,8 +314,8 @@ mod tests {
 		AsyncApi::new(mock, false, shared_client)
 	}
 
-	fn offchain_db() -> Db<LocalStorage> {
-		Db::new(LocalStorage::new_test())
+	fn offchain_db() -> OffchainDb<LocalStorage> {
+		OffchainDb::new(LocalStorage::new_test())
 	}
 
 	#[test]
diff --git a/substrate/client/offchain/src/lib.rs b/substrate/client/offchain/src/lib.rs
index 0945e39a3a8..4c11a5cb729 100644
--- a/substrate/client/offchain/src/lib.rs
+++ b/substrate/client/offchain/src/lib.rs
@@ -35,22 +35,26 @@
 
 #![warn(missing_docs)]
 
-use std::{fmt, marker::PhantomData, sync::Arc};
+use std::{fmt, sync::Arc};
 
 use futures::{
 	future::{ready, Future},
 	prelude::*,
 };
 use parking_lot::Mutex;
+use sc_client_api::BlockchainEvents;
 use sc_network::{NetworkPeers, NetworkStateInfo};
+use sc_transaction_pool_api::OffchainTransactionPoolFactory;
 use sp_api::{ApiExt, ProvideRuntimeApi};
-use sp_core::{offchain, traits::SpawnNamed, ExecutionContext};
+use sp_core::{offchain, traits::SpawnNamed};
+use sp_externalities::Extension;
+use sp_keystore::{KeystoreExt, KeystorePtr};
 use sp_runtime::traits::{self, Header};
 use threadpool::ThreadPool;
 
 mod api;
 
-pub use api::Db as OffchainDb;
+pub use sp_core::offchain::storage::OffchainDb;
 pub use sp_offchain::{OffchainWorkerApi, STORAGE_PREFIX};
 
 const LOG_TARGET: &str = "offchain-worker";
@@ -61,65 +65,160 @@ pub trait NetworkProvider: NetworkStateInfo + NetworkPeers {}
 
 impl<T> NetworkProvider for T where T: NetworkStateInfo + NetworkPeers {}
 
+/// Special type that implements [`OffchainStorage`](offchain::OffchainStorage).
+///
+/// This type can not be constructed and should only be used when passing `None` as `offchain_db` to
+/// [`OffchainWorkerOptions`] to make the compiler happy.
+#[derive(Clone)]
+pub enum NoOffchainStorage {}
+
+impl offchain::OffchainStorage for NoOffchainStorage {
+	fn set(&mut self, _: &[u8], _: &[u8], _: &[u8]) {
+		unimplemented!("`NoOffchainStorage` can not be constructed!")
+	}
+
+	fn remove(&mut self, _: &[u8], _: &[u8]) {
+		unimplemented!("`NoOffchainStorage` can not be constructed!")
+	}
+
+	fn get(&self, _: &[u8], _: &[u8]) -> Option<Vec<u8>> {
+		unimplemented!("`NoOffchainStorage` can not be constructed!")
+	}
+
+	fn compare_and_set(&mut self, _: &[u8], _: &[u8], _: Option<&[u8]>, _: &[u8]) -> bool {
+		unimplemented!("`NoOffchainStorage` can not be constructed!")
+	}
+}
+
 /// Options for [`OffchainWorkers`]
-pub struct OffchainWorkerOptions {
+pub struct OffchainWorkerOptions<RA, Block: traits::Block, Storage, CE> {
+	/// Provides access to the runtime api.
+	pub runtime_api_provider: Arc<RA>,
+	/// Provides access to the keystore.
+	pub keystore: Option<KeystorePtr>,
+	/// Provides access to the offchain database.
+	///
+	/// Use [`NoOffchainStorage`] as type when passing `None` to have some type that works.
+	pub offchain_db: Option<Storage>,
+	/// Provides access to the transaction pool.
+	pub transaction_pool: Option<OffchainTransactionPoolFactory<Block>>,
+	/// Provides access to network information.
+	pub network_provider: Arc<dyn NetworkProvider + Send + Sync>,
+	/// Is the node running as validator?
+	pub is_validator: bool,
 	/// Enable http requests from offchain workers?
 	///
 	/// If not enabled, any http request will panic.
 	pub enable_http_requests: bool,
+	/// Callback to create custom [`Extension`]s that should be registered for the
+	/// `offchain_worker` runtime call.
+	///
+	/// These [`Extension`]s are registered along-side the default extensions and are accessible in
+	/// the host functions.
+	///
+	/// # Example:
+	///
+	/// ```nocompile
+	/// custom_extensions: |block_hash| {
+	///     vec![MyCustomExtension::new()]
+	/// }
+	/// ```
+	pub custom_extensions: CE,
 }
 
 /// An offchain workers manager.
-pub struct OffchainWorkers<Client, Block: traits::Block> {
-	client: Arc<Client>,
-	_block: PhantomData<Block>,
+pub struct OffchainWorkers<RA, Block: traits::Block, Storage> {
+	runtime_api_provider: Arc<RA>,
 	thread_pool: Mutex<ThreadPool>,
 	shared_http_client: api::SharedClient,
-	enable_http: bool,
+	enable_http_requests: bool,
+	keystore: Option<KeystorePtr>,
+	offchain_db: Option<OffchainDb<Storage>>,
+	transaction_pool: Option<OffchainTransactionPoolFactory<Block>>,
+	network_provider: Arc<dyn NetworkProvider + Send + Sync>,
+	is_validator: bool,
+	custom_extensions: Box<dyn Fn(Block::Hash) -> Vec<Box<dyn Extension>> + Send>,
 }
 
-impl<Client, Block: traits::Block> OffchainWorkers<Client, Block> {
+impl<RA, Block: traits::Block, Storage> OffchainWorkers<RA, Block, Storage> {
 	/// Creates new [`OffchainWorkers`].
-	pub fn new(client: Arc<Client>) -> Self {
-		Self::new_with_options(client, OffchainWorkerOptions { enable_http_requests: true })
-	}
-
-	/// Creates new [`OffchainWorkers`] using the given `options`.
-	pub fn new_with_options(client: Arc<Client>, options: OffchainWorkerOptions) -> Self {
+	pub fn new<CE: Fn(Block::Hash) -> Vec<Box<dyn Extension>> + Send + 'static>(
+		OffchainWorkerOptions {
+			runtime_api_provider,
+			keystore,
+			offchain_db,
+			transaction_pool,
+			network_provider,
+			is_validator,
+			enable_http_requests,
+			custom_extensions,
+		}: OffchainWorkerOptions<RA, Block, Storage, CE>,
+	) -> Self {
 		Self {
-			client,
-			_block: PhantomData,
+			runtime_api_provider,
 			thread_pool: Mutex::new(ThreadPool::with_name(
 				"offchain-worker".into(),
 				num_cpus::get(),
 			)),
 			shared_http_client: api::SharedClient::new(),
-			enable_http: options.enable_http_requests,
+			enable_http_requests,
+			keystore,
+			offchain_db: offchain_db.map(OffchainDb::new),
+			transaction_pool,
+			is_validator,
+			network_provider,
+			custom_extensions: Box::new(custom_extensions),
 		}
 	}
 }
 
-impl<Client, Block: traits::Block> fmt::Debug for OffchainWorkers<Client, Block> {
+impl<RA, Block: traits::Block, Storage: offchain::OffchainStorage> fmt::Debug
+	for OffchainWorkers<RA, Block, Storage>
+{
 	fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
 		f.debug_tuple("OffchainWorkers").finish()
 	}
 }
 
-impl<Client, Block> OffchainWorkers<Client, Block>
+impl<RA, Block, Storage> OffchainWorkers<RA, Block, Storage>
 where
 	Block: traits::Block,
-	Client: ProvideRuntimeApi<Block> + Send + Sync + 'static,
-	Client::Api: OffchainWorkerApi<Block>,
+	RA: ProvideRuntimeApi<Block> + Send + Sync + 'static,
+	RA::Api: OffchainWorkerApi<Block>,
+	Storage: offchain::OffchainStorage + 'static,
 {
+	/// Run the offchain workers on every block import.
+	pub async fn run<BE: BlockchainEvents<Block>>(
+		self,
+		import_events: Arc<BE>,
+		spawner: impl SpawnNamed,
+	) {
+		import_events
+			.import_notification_stream()
+			.for_each(move |n| {
+				if n.is_new_best {
+					spawner.spawn(
+						"offchain-on-block",
+						Some("offchain-worker"),
+						self.on_block_imported(&n.header).boxed(),
+					);
+				} else {
+					tracing::debug!(
+						target: LOG_TARGET,
+						"Skipping offchain workers for non-canon block: {:?}",
+						n.header,
+					)
+				}
+
+				ready(())
+			})
+			.await;
+	}
+
 	/// Start the offchain workers after given block.
 	#[must_use]
-	pub fn on_block_imported(
-		&self,
-		header: &Block::Header,
-		network_provider: Arc<dyn NetworkProvider + Send + Sync>,
-		is_validator: bool,
-	) -> impl Future<Output = ()> {
-		let runtime = self.client.runtime_api();
+	fn on_block_imported(&self, header: &Block::Header) -> impl Future<Output = ()> {
+		let runtime = self.runtime_api_provider.runtime_api();
 		let hash = header.hash();
 		let has_api_v1 = runtime.has_api_with::<dyn OffchainWorkerApi<Block>, _>(hash, |v| v == 1);
 		let has_api_v2 = runtime.has_api_with::<dyn OffchainWorkerApi<Block>, _>(hash, |v| v == 2);
@@ -140,36 +239,59 @@ where
 		};
 		tracing::debug!(
 			target: LOG_TARGET,
-			"Checking offchain workers at {:?}: version:{}",
-			hash,
-			version
+			"Checking offchain workers at {hash:?}: version: {version}",
 		);
+
 		let process = (version > 0).then(|| {
-			let (api, runner) =
-				api::AsyncApi::new(network_provider, is_validator, self.shared_http_client.clone());
-			tracing::debug!(target: LOG_TARGET, "Spawning offchain workers at {:?}", hash);
+			let (api, runner) = api::AsyncApi::new(
+				self.network_provider.clone(),
+				self.is_validator,
+				self.shared_http_client.clone(),
+			);
+			tracing::debug!(target: LOG_TARGET, "Spawning offchain workers at {hash:?}");
 			let header = header.clone();
-			let client = self.client.clone();
+			let client = self.runtime_api_provider.clone();
 
 			let mut capabilities = offchain::Capabilities::all();
+			capabilities.set(offchain::Capabilities::HTTP, self.enable_http_requests);
+
+			let keystore = self.keystore.clone();
+			let db = self.offchain_db.clone();
+			let tx_pool = self.transaction_pool.clone();
+			let custom_extensions = (*self.custom_extensions)(hash);
 
-			capabilities.set(offchain::Capabilities::HTTP, self.enable_http);
 			self.spawn_worker(move || {
-				let runtime = client.runtime_api();
+				let mut runtime = client.runtime_api();
 				let api = Box::new(api);
-				tracing::debug!(target: LOG_TARGET, "Running offchain workers at {:?}", hash);
+				tracing::debug!(target: LOG_TARGET, "Running offchain workers at {hash:?}");
+
+				if let Some(keystore) = keystore {
+					runtime.register_extension(KeystoreExt(keystore.clone()));
+				}
+
+				if let Some(pool) = tx_pool {
+					runtime.register_extension(pool.offchain_transaction_pool(hash));
+				}
+
+				if let Some(offchain_db) = db {
+					runtime.register_extension(offchain::OffchainDbExt::new(
+						offchain::LimitedExternalities::new(capabilities, offchain_db.clone()),
+					));
+				}
+
+				runtime.register_extension(offchain::OffchainWorkerExt::new(
+					offchain::LimitedExternalities::new(capabilities, api),
+				));
+
+				custom_extensions.into_iter().for_each(|ext| runtime.register_extension(ext));
 
-				let context = ExecutionContext::OffchainCall(Some((api, capabilities)));
 				let run = if version == 2 {
-					runtime.offchain_worker_with_context(hash, context, &header)
+					runtime.offchain_worker(hash, &header)
 				} else {
 					#[allow(deprecated)]
-					runtime.offchain_worker_before_version_2_with_context(
-						hash,
-						context,
-						*header.number(),
-					)
+					runtime.offchain_worker_before_version_2(hash, *header.number())
 				};
+
 				if let Err(e) = run {
 					tracing::error!(
 						target: LOG_TARGET,
@@ -201,44 +323,6 @@ where
 	}
 }
 
-/// Inform the offchain worker about new imported blocks
-pub async fn notification_future<Client, Block, Spawner>(
-	is_validator: bool,
-	client: Arc<Client>,
-	offchain: Arc<OffchainWorkers<Client, Block>>,
-	spawner: Spawner,
-	network_provider: Arc<dyn NetworkProvider + Send + Sync>,
-) where
-	Block: traits::Block,
-	Client:
-		ProvideRuntimeApi<Block> + sc_client_api::BlockchainEvents<Block> + Send + Sync + 'static,
-	Client::Api: OffchainWorkerApi<Block>,
-	Spawner: SpawnNamed,
-{
-	client
-		.import_notification_stream()
-		.for_each(move |n| {
-			if n.is_new_best {
-				spawner.spawn(
-					"offchain-on-block",
-					Some("offchain-worker"),
-					offchain
-						.on_block_imported(&n.header, network_provider.clone(), is_validator)
-						.boxed(),
-				);
-			} else {
-				tracing::debug!(
-					target: LOG_TARGET,
-					"Skipping offchain workers for non-canon block: {:?}",
-					n.header,
-				)
-			}
-
-			ready(())
-		})
-		.await;
-}
-
 #[cfg(test)]
 mod tests {
 	use super::*;
@@ -348,8 +432,17 @@ mod tests {
 		let header = client.header(client.chain_info().genesis_hash).unwrap().unwrap();
 
 		// when
-		let offchain = OffchainWorkers::new(client);
-		futures::executor::block_on(offchain.on_block_imported(&header, network, false));
+		let offchain = OffchainWorkers::new(OffchainWorkerOptions {
+			runtime_api_provider: client,
+			keystore: None,
+			offchain_db: None::<NoOffchainStorage>,
+			transaction_pool: Some(OffchainTransactionPoolFactory::new(pool.clone())),
+			network_provider: network,
+			is_validator: false,
+			enable_http_requests: false,
+			custom_extensions: |_| Vec::new(),
+		});
+		futures::executor::block_on(offchain.on_block_imported(&header));
 
 		// then
 		assert_eq!(pool.status().ready, 1);
diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml
index c691b7b05fc..4f5c11212a9 100644
--- a/substrate/client/rpc-spec-v2/Cargo.toml
+++ b/substrate/client/rpc-spec-v2/Cargo.toml
@@ -46,3 +46,4 @@ sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" }
 sc-service = { version = "0.10.0-dev", features = ["test-helpers"], path = "../service" }
 sc-utils = { version = "4.0.0-dev", path = "../utils" }
 assert_matches = "1.3.0"
+pretty_assertions = "1.2.1"
diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs
index 0f2d55f1009..bb3599c0e4a 100644
--- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs
+++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs
@@ -420,13 +420,7 @@ where
 
 			let res = client
 				.executor()
-				.call(
-					hash,
-					&function,
-					&call_parameters,
-					client.execution_extensions().strategies().other,
-					CallContext::Offchain,
-				)
+				.call(hash, &function, &call_parameters, CallContext::Offchain)
 				.map(|result| {
 					let result = format!("0x{:?}", HexDisplay::from(&result));
 					ChainHeadEvent::Done(ChainHeadResult { result })
diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs
index bdb14e8de71..adab64a01c8 100644
--- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs
+++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs
@@ -539,7 +539,6 @@ mod tests {
 				genesis_block_builder,
 				None,
 				None,
-				None,
 				Box::new(TaskExecutor::new()),
 				client_config,
 			)
diff --git a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs
index ee563debb45..54c585932a7 100644
--- a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs
+++ b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs
@@ -217,6 +217,14 @@ impl<Block: BlockT, Client: CallApiAt<Block>> CallApiAt<Block> for ChainHeadMock
 	fn state_at(&self, at: Block::Hash) -> Result<Self::StateBackend, sp_api::ApiError> {
 		self.client.state_at(at)
 	}
+
+	fn initialize_extensions(
+		&self,
+		at: <Block as BlockT>::Hash,
+		extensions: &mut sp_api::Extensions,
+	) -> Result<(), sp_api::ApiError> {
+		self.client.initialize_extensions(at, extensions)
+	}
 }
 
 impl<Block: BlockT, Client: BlockBackend<Block>> BlockBackend<Block>
diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs
index cb6e65f8591..5dd3cc3da6d 100644
--- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs
+++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs
@@ -191,6 +191,7 @@ async fn follow_with_runtime() {
 		[\"0xbc9d89904f5b923f\",1],[\"0xc6e9a76309f39b09\",2],[\"0xdd718d5cc53262d4\",1],\
 		[\"0xcbca25e39f142387\",2],[\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],\
 		[\"0xed99c5acb25eedf5\",3]],\"transactionVersion\":1,\"stateVersion\":1}";
+
 	let runtime: RuntimeVersion = serde_json::from_str(runtime_str).unwrap();
 
 	let finalized_block_runtime =
@@ -201,7 +202,7 @@ async fn follow_with_runtime() {
 		finalized_block_runtime,
 		with_runtime: false,
 	});
-	assert_eq!(event, expected);
+	pretty_assertions::assert_eq!(event, expected);
 
 	// Import a new block without runtime changes.
 	// The runtime field must be None in this case.
@@ -1365,7 +1366,6 @@ async fn pin_block_references() {
 			genesis_block_builder,
 			None,
 			None,
-			None,
 			Box::new(TaskExecutor::new()),
 			client_config,
 		)
diff --git a/substrate/client/rpc/Cargo.toml b/substrate/client/rpc/Cargo.toml
index f9c36bf57c6..a3574ed84d0 100644
--- a/substrate/client/rpc/Cargo.toml
+++ b/substrate/client/rpc/Cargo.toml
@@ -50,6 +50,7 @@ sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/comm
 tokio = "1.22.0"
 sp-io = { version = "23.0.0", path = "../../primitives/io" }
 substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" }
+pretty_assertions = "1.2.1"
 
 [features]
 test-helpers = []
diff --git a/substrate/client/rpc/src/author/mod.rs b/substrate/client/rpc/src/author/mod.rs
index 00a126500e2..feee22641ef 100644
--- a/substrate/client/rpc/src/author/mod.rs
+++ b/substrate/client/rpc/src/author/mod.rs
@@ -37,10 +37,10 @@ use sc_transaction_pool_api::{
 	error::IntoPoolError, BlockHash, InPoolTransaction, TransactionFor, TransactionPool,
 	TransactionSource, TxHash,
 };
-use sp_api::ProvideRuntimeApi;
+use sp_api::{ApiExt, ProvideRuntimeApi};
 use sp_blockchain::HeaderBackend;
 use sp_core::Bytes;
-use sp_keystore::KeystorePtr;
+use sp_keystore::{KeystoreExt, KeystorePtr};
 use sp_runtime::{generic, traits::Block as BlockT};
 use sp_session::SessionKeys;
 
@@ -122,8 +122,11 @@ where
 		self.deny_unsafe.check_if_safe()?;
 
 		let best_block_hash = self.client.info().best_hash;
-		self.client
-			.runtime_api()
+		let mut runtime_api = self.client.runtime_api();
+
+		runtime_api.register_extension(KeystoreExt::from(self.keystore.clone()));
+
+		runtime_api
 			.generate_session_keys(best_block_hash, None)
 			.map(Into::into)
 			.map_err(|api_err| Error::Client(Box::new(api_err)).into())
diff --git a/substrate/client/rpc/src/author/tests.rs b/substrate/client/rpc/src/author/tests.rs
index 1f688e8e85e..f48b2f95714 100644
--- a/substrate/client/rpc/src/author/tests.rs
+++ b/substrate/client/rpc/src/author/tests.rs
@@ -66,8 +66,7 @@ struct TestSetup {
 impl Default for TestSetup {
 	fn default() -> Self {
 		let keystore = Arc::new(MemoryKeystore::new());
-		let client_builder = substrate_test_runtime_client::TestClientBuilder::new();
-		let client = Arc::new(client_builder.set_keystore(keystore.clone()).build());
+		let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new().build());
 
 		let spawner = sp_core::testing::TaskExecutor::new();
 		let pool =
diff --git a/substrate/client/rpc/src/state/state_full.rs b/substrate/client/rpc/src/state/state_full.rs
index 20ca5f7131e..9604d9165f9 100644
--- a/substrate/client/rpc/src/state/state_full.rs
+++ b/substrate/client/rpc/src/state/state_full.rs
@@ -198,13 +198,7 @@ where
 			.and_then(|block| {
 				self.client
 					.executor()
-					.call(
-						block,
-						&method,
-						&call_data,
-						self.client.execution_extensions().strategies().other,
-						CallContext::Offchain,
-					)
+					.call(block, &method, &call_data, CallContext::Offchain)
 					.map(Into::into)
 			})
 			.map_err(client_err)
diff --git a/substrate/client/rpc/src/state/tests.rs b/substrate/client/rpc/src/state/tests.rs
index 9e00a04abe3..7c73c1e1cd8 100644
--- a/substrate/client/rpc/src/state/tests.rs
+++ b/substrate/client/rpc/src/state/tests.rs
@@ -522,7 +522,7 @@ async fn should_return_runtime_version() {
 
 	let runtime_version = api.runtime_version(None.into()).unwrap();
 	let serialized = serde_json::to_string(&runtime_version).unwrap();
-	assert_eq!(serialized, result);
+	pretty_assertions::assert_eq!(serialized, result);
 
 	let deserialized: RuntimeVersion = serde_json::from_str(result).unwrap();
 	assert_eq!(deserialized, runtime_version);
diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml
index b77a6ef58e6..4f280503fbb 100644
--- a/substrate/client/service/Cargo.toml
+++ b/substrate/client/service/Cargo.toml
@@ -69,7 +69,6 @@ sc-rpc-spec-v2 = { version = "0.10.0-dev", path = "../rpc-spec-v2" }
 sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" }
 sc-informant = { version = "0.10.0-dev", path = "../informant" }
 sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" }
-sc-offchain = { version = "4.0.0-dev", path = "../offchain" }
 prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev" }
 sc-tracing = { version = "4.0.0-dev", path = "../tracing" }
 sc-sysinfo = { version = "6.0.0-dev", path = "../sysinfo" }
diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs
index 166e1b87855..6b537e7ee48 100644
--- a/substrate/client/service/src/builder.rs
+++ b/substrate/client/service/src/builder.rs
@@ -187,9 +187,7 @@ where
 
 	let client = {
 		let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new(
-			config.execution_strategies.clone(),
-			Some(keystore_container.keystore()),
-			sc_offchain::OffchainDb::factory_from_backend(&*backend),
+			None,
 			Arc::new(executor.clone()),
 		);
 
@@ -322,19 +320,14 @@ where
 
 /// Shared network instance implementing a set of mandatory traits.
 pub trait SpawnTaskNetwork<Block: BlockT>:
-	sc_offchain::NetworkProvider + NetworkStateInfo + NetworkStatusProvider + Send + Sync + 'static
+	NetworkStateInfo + NetworkStatusProvider + Send + Sync + 'static
 {
 }
 
 impl<T, Block> SpawnTaskNetwork<Block> for T
 where
 	Block: BlockT,
-	T: sc_offchain::NetworkProvider
-		+ NetworkStateInfo
-		+ NetworkStatusProvider
-		+ Send
-		+ Sync
-		+ 'static,
+	T: NetworkStateInfo + NetworkStatusProvider + Send + Sync + 'static,
 {
 }
 
@@ -368,38 +361,6 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> {
 	pub telemetry: Option<&'a mut Telemetry>,
 }
 
-/// Build a shared offchain workers instance.
-pub fn build_offchain_workers<TBl, TCl>(
-	config: &Configuration,
-	spawn_handle: SpawnTaskHandle,
-	client: Arc<TCl>,
-	network: Arc<dyn sc_offchain::NetworkProvider + Send + Sync>,
-) -> Option<Arc<sc_offchain::OffchainWorkers<TCl, TBl>>>
-where
-	TBl: BlockT,
-	TCl: Send + Sync + ProvideRuntimeApi<TBl> + BlockchainEvents<TBl> + 'static,
-	<TCl as ProvideRuntimeApi<TBl>>::Api: sc_offchain::OffchainWorkerApi<TBl>,
-{
-	let offchain_workers = Some(Arc::new(sc_offchain::OffchainWorkers::new(client.clone())));
-
-	// Inform the offchain worker about new imported blocks
-	if let Some(offchain) = offchain_workers.clone() {
-		spawn_handle.spawn(
-			"offchain-notifications",
-			Some("offchain-worker"),
-			sc_offchain::notification_future(
-				config.role.is_authority(),
-				client,
-				offchain,
-				Clone::clone(&spawn_handle),
-				network,
-			),
-		);
-	}
-
-	offchain_workers
-}
-
 /// Spawn the tasks that are required to run a node.
 pub fn spawn_tasks<TBl, TBackend, TExPool, TRpc, TCl>(
 	params: SpawnTasksParams<TBl, TCl, TExPool, TRpc, TBackend>,
@@ -420,7 +381,6 @@ where
 		+ Send
 		+ 'static,
 	<TCl as ProvideRuntimeApi<TBl>>::Api: sp_api::Metadata<TBl>
-		+ sc_offchain::OffchainWorkerApi<TBl>
 		+ sp_transaction_pool::runtime_api::TaggedTransactionQueue<TBl>
 		+ sp_session::SessionKeys<TBl>
 		+ sp_api::ApiExt<TBl, StateBackend = TBackend::State>,
@@ -451,6 +411,7 @@ where
 		client.clone(),
 		chain_info.best_hash,
 		config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(),
+		keystore.clone(),
 	)
 	.map_err(|e| Error::Application(Box::new(e)))?;
 
diff --git a/substrate/client/service/src/client/call_executor.rs b/substrate/client/service/src/client/call_executor.rs
index 7f83d62874c..facde72321d 100644
--- a/substrate/client/service/src/client/call_executor.rs
+++ b/substrate/client/service/src/client/call_executor.rs
@@ -22,14 +22,10 @@ use sc_client_api::{
 };
 use sc_executor::{RuntimeVersion, RuntimeVersionOf};
 use sp_api::{ProofRecorder, StorageTransactionCache};
-use sp_core::{
-	traits::{CallContext, CodeExecutor, RuntimeCode},
-	ExecutionContext,
-};
+use sp_core::traits::{CallContext, CodeExecutor, RuntimeCode};
+use sp_externalities::Extensions;
 use sp_runtime::{generic::BlockId, traits::Block as BlockT};
-use sp_state_machine::{
-	backend::AsTrieBackend, ExecutionStrategy, Ext, OverlayedChanges, StateMachine, StorageProof,
-};
+use sp_state_machine::{backend::AsTrieBackend, Ext, OverlayedChanges, StateMachine, StorageProof};
 use std::{cell::RefCell, sync::Arc};
 
 /// Call executor that executes methods locally, querying all required
@@ -166,7 +162,6 @@ where
 		at_hash: Block::Hash,
 		method: &str,
 		call_data: &[u8],
-		strategy: ExecutionStrategy,
 		context: CallContext,
 	) -> sp_blockchain::Result<Vec<u8>> {
 		let mut changes = OverlayedChanges::default();
@@ -180,11 +175,7 @@ where
 
 		let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0;
 
-		let extensions = self.execution_extensions.extensions(
-			at_hash,
-			at_number,
-			ExecutionContext::OffchainCall(None),
-		);
+		let mut extensions = self.execution_extensions.extensions(at_hash, at_number);
 
 		let mut sm = StateMachine::new(
 			&state,
@@ -192,14 +183,13 @@ where
 			&self.executor,
 			method,
 			call_data,
-			extensions,
+			&mut extensions,
 			&runtime_code,
 			context,
 		)
 		.set_parent_hash(at_hash);
 
-		sm.execute_using_consensus_failure_handler(strategy.get_manager())
-			.map_err(Into::into)
+		sm.execute().map_err(Into::into)
 	}
 
 	fn contextual_call(
@@ -210,22 +200,13 @@ where
 		changes: &RefCell<OverlayedChanges>,
 		storage_transaction_cache: Option<&RefCell<StorageTransactionCache<Block, B::State>>>,
 		recorder: &Option<ProofRecorder<Block>>,
-		context: ExecutionContext,
+		call_context: CallContext,
+		extensions: &RefCell<Extensions>,
 	) -> Result<Vec<u8>, sp_blockchain::Error> {
 		let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut());
 
-		let at_number =
-			self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(at_hash))?;
 		let state = self.backend.state_at(at_hash)?;
 
-		let call_context = match context {
-			ExecutionContext::OffchainCall(_) => CallContext::Offchain,
-			_ => CallContext::Onchain,
-		};
-
-		let (execution_manager, extensions) =
-			self.execution_extensions.manager_and_extensions(at_hash, at_number, context);
-
 		let changes = &mut *changes.borrow_mut();
 
 		// It is important to extract the runtime code here before we create the proof
@@ -236,6 +217,7 @@ where
 		let runtime_code =
 			state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?;
 		let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0;
+		let mut extensions = extensions.borrow_mut();
 
 		match recorder {
 			Some(recorder) => {
@@ -251,13 +233,13 @@ where
 					&self.executor,
 					method,
 					call_data,
-					extensions,
+					&mut extensions,
 					&runtime_code,
 					call_context,
 				)
 				.with_storage_transaction_cache(storage_transaction_cache.as_deref_mut())
 				.set_parent_hash(at_hash);
-				state_machine.execute_using_consensus_failure_handler(execution_manager)
+				state_machine.execute()
 			},
 			None => {
 				let mut state_machine = StateMachine::new(
@@ -266,13 +248,13 @@ where
 					&self.executor,
 					method,
 					call_data,
-					extensions,
+					&mut extensions,
 					&runtime_code,
 					call_context,
 				)
 				.with_storage_transaction_cache(storage_transaction_cache.as_deref_mut())
 				.set_parent_hash(at_hash);
-				state_machine.execute_using_consensus_failure_handler(execution_manager)
+				state_machine.execute()
 			},
 		}
 		.map_err(Into::into)
@@ -311,11 +293,7 @@ where
 			method,
 			call_data,
 			&runtime_code,
-			self.execution_extensions.extensions(
-				at_hash,
-				at_number,
-				ExecutionContext::OffchainCall(None),
-			),
+			&mut self.execution_extensions.extensions(at_hash, at_number),
 		)
 		.map_err(Into::into)
 	}
@@ -411,7 +389,6 @@ mod tests {
 				backend.clone(),
 				executor.clone(),
 				genesis_block_builder,
-				None,
 				Box::new(TaskExecutor::new()),
 				None,
 				None,
@@ -430,8 +407,6 @@ mod tests {
 			)
 			.unwrap(),
 			execution_extensions: Arc::new(ExecutionExtensions::new(
-				Default::default(),
-				None,
 				None,
 				Arc::new(executor.clone()),
 			)),
@@ -486,7 +461,6 @@ mod tests {
 				backend.clone(),
 				executor.clone(),
 				genesis_block_builder,
-				None,
 				Box::new(TaskExecutor::new()),
 				None,
 				None,
diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs
index 3b1a526154d..8c4b14fbddc 100644
--- a/substrate/client/service/src/client/client.rs
+++ b/substrate/client/service/src/client/client.rs
@@ -62,10 +62,8 @@ use sp_core::{
 		well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData,
 		StorageKey,
 	},
-	traits::SpawnNamed,
+	traits::{CallContext, SpawnNamed},
 };
-#[cfg(feature = "test-helpers")]
-use sp_keystore::KeystorePtr;
 use sp_runtime::{
 	generic::{BlockId, SignedBlock},
 	traits::{
@@ -161,7 +159,6 @@ pub fn new_in_mem<E, Block, G, RA>(
 	backend: Arc<in_mem::Backend<Block>>,
 	executor: E,
 	genesis_block_builder: G,
-	keystore: Option<KeystorePtr>,
 	prometheus_registry: Option<Registry>,
 	telemetry: Option<TelemetryHandle>,
 	spawn_handle: Box<dyn SpawnNamed>,
@@ -181,7 +178,6 @@ where
 		backend,
 		executor,
 		genesis_block_builder,
-		keystore,
 		spawn_handle,
 		prometheus_registry,
 		telemetry,
@@ -224,7 +220,6 @@ pub fn new_with_backend<B, E, Block, G, RA>(
 	backend: Arc<B>,
 	executor: E,
 	genesis_block_builder: G,
-	keystore: Option<KeystorePtr>,
 	spawn_handle: Box<dyn SpawnNamed>,
 	prometheus_registry: Option<Registry>,
 	telemetry: Option<TelemetryHandle>,
@@ -239,12 +234,7 @@ where
 	Block: BlockT,
 	B: backend::LocalBackend<Block> + 'static,
 {
-	let extensions = ExecutionExtensions::new(
-		Default::default(),
-		keystore,
-		sc_offchain::OffchainDb::factory_from_backend(&*backend),
-		Arc::new(executor.clone()),
-	);
+	let extensions = ExecutionExtensions::new(None, Arc::new(executor.clone()));
 
 	let call_executor =
 		LocalCallExecutor::new(backend.clone(), executor, config.clone(), extensions)?;
@@ -875,12 +865,12 @@ where
 			// We should enact state, but don't have any storage changes, so we need to execute the
 			// block.
 			(true, None, Some(ref body)) => {
-				let runtime_api = self.runtime_api();
-				let execution_context = import_block.origin.into();
+				let mut runtime_api = self.runtime_api();
+
+				runtime_api.set_call_context(CallContext::Onchain);
 
-				runtime_api.execute_block_with_context(
+				runtime_api.execute_block(
 					*parent_hash,
-					execution_context,
 					Block::new(import_block.header.clone(), body.clone()),
 				)?;
 
@@ -1727,7 +1717,8 @@ where
 				params.overlayed_changes,
 				Some(params.storage_transaction_cache),
 				params.recorder,
-				params.context,
+				params.call_context,
+				params.extensions,
 			)
 			.map_err(Into::into)
 	}
@@ -1739,6 +1730,18 @@ where
 	fn state_at(&self, at: Block::Hash) -> Result<Self::StateBackend, sp_api::ApiError> {
 		self.state_at(at).map_err(Into::into)
 	}
+
+	fn initialize_extensions(
+		&self,
+		at: Block::Hash,
+		extensions: &mut sp_externalities::Extensions,
+	) -> Result<(), sp_api::ApiError> {
+		let block_number = self.expect_block_number_from_id(&BlockId::Hash(at))?;
+
+		extensions.merge(self.executor.execution_extensions().extensions(at, block_number));
+
+		Ok(())
+	}
 }
 
 /// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport
diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs
index 52e17c95e67..39b7ee05079 100644
--- a/substrate/client/service/src/config.rs
+++ b/substrate/client/service/src/config.rs
@@ -18,7 +18,6 @@
 
 //! Service configuration.
 
-pub use sc_client_api::execution_extensions::{ExecutionStrategies, ExecutionStrategy};
 pub use sc_client_db::{BlocksPruning, Database, DatabaseSource, PruningMode};
 pub use sc_executor::{WasmExecutionMethod, WasmtimeInstantiationStrategy};
 pub use sc_network::{
@@ -81,8 +80,6 @@ pub struct Configuration {
 	/// over on-chain runtimes when the spec version matches. Set to `None` to
 	/// disable overrides (default).
 	pub wasm_runtime_overrides: Option<PathBuf>,
-	/// Execution strategies.
-	pub execution_strategies: ExecutionStrategies,
 	/// JSON-RPC server binding address.
 	pub rpc_addr: Option<SocketAddr>,
 	/// Maximum number of connections for JSON-RPC server.
diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs
index 4a896ecc872..c987c247190 100644
--- a/substrate/client/service/src/lib.rs
+++ b/substrate/client/service/src/lib.rs
@@ -55,8 +55,8 @@ use sp_runtime::{
 
 pub use self::{
 	builder::{
-		build_network, build_offchain_workers, new_client, new_db_backend, new_full_client,
-		new_full_parts, new_full_parts_with_genesis_builder, new_native_or_wasm_executor,
+		build_network, new_client, new_db_backend, new_full_client, new_full_parts,
+		new_full_parts_with_genesis_builder, new_native_or_wasm_executor, new_wasm_executor,
 		spawn_tasks, BuildNetworkParams, KeystoreContainer, NetworkStarter, SpawnTasksParams,
 		TFullBackend, TFullCallExecutor, TFullClient,
 	},
diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs
index 9c490a47a3b..c40ac33da4b 100644
--- a/substrate/client/service/test/src/client/mod.rs
+++ b/substrate/client/service/test/src/client/mod.rs
@@ -37,9 +37,7 @@ use sp_runtime::{
 	traits::{BlakeTwo256, Block as BlockT, Header as HeaderT},
 	ConsensusEngineId, Justifications, StateVersion,
 };
-use sp_state_machine::{
-	backend::Backend as _, ExecutionStrategy, InMemoryBackend, OverlayedChanges, StateMachine,
-};
+use sp_state_machine::{backend::Backend as _, InMemoryBackend, OverlayedChanges, StateMachine};
 use sp_storage::{ChildInfo, StorageKey};
 use sp_trie::{LayoutV0, TrieConfiguration};
 use std::{collections::HashSet, sync::Arc};
@@ -90,11 +88,11 @@ fn construct_block(
 		&new_native_or_wasm_executor(),
 		"Core_initialize_block",
 		&header.encode(),
-		Default::default(),
+		&mut Default::default(),
 		&runtime_code,
 		CallContext::Onchain,
 	)
-	.execute(ExecutionStrategy::NativeElseWasm)
+	.execute()
 	.unwrap();
 
 	for tx in transactions.iter() {
@@ -104,11 +102,11 @@ fn construct_block(
 			&new_native_or_wasm_executor(),
 			"BlockBuilder_apply_extrinsic",
 			&tx.encode(),
-			Default::default(),
+			&mut Default::default(),
 			&runtime_code,
 			CallContext::Onchain,
 		)
-		.execute(ExecutionStrategy::NativeElseWasm)
+		.execute()
 		.unwrap();
 	}
 
@@ -118,11 +116,11 @@ fn construct_block(
 		&new_native_or_wasm_executor(),
 		"BlockBuilder_finalize_block",
 		&[],
-		Default::default(),
+		&mut Default::default(),
 		&runtime_code,
 		CallContext::Onchain,
 	)
-	.execute(ExecutionStrategy::NativeElseWasm)
+	.execute()
 	.unwrap();
 	header = Header::decode(&mut &ret_data[..]).unwrap();
 
@@ -189,11 +187,11 @@ fn construct_genesis_should_work_with_native() {
 		&new_native_or_wasm_executor(),
 		"Core_execute_block",
 		&b1data,
-		Default::default(),
+		&mut Default::default(),
 		&runtime_code,
 		CallContext::Onchain,
 	)
-	.execute(ExecutionStrategy::NativeElseWasm)
+	.execute()
 	.unwrap();
 }
 
@@ -220,11 +218,11 @@ fn construct_genesis_should_work_with_wasm() {
 		&new_native_or_wasm_executor(),
 		"Core_execute_block",
 		&b1data,
-		Default::default(),
+		&mut Default::default(),
 		&runtime_code,
 		CallContext::Onchain,
 	)
-	.execute(ExecutionStrategy::AlwaysWasm)
+	.execute()
 	.unwrap();
 }
 
@@ -1670,22 +1668,21 @@ fn storage_keys_prefix_and_start_key_works() {
 
 	let block_hash = client.info().best_hash;
 
-	let child_root = b":child_storage:default:child".to_vec();
+	let child_root = array_bytes::bytes2hex("", b":child_storage:default:child");
 	let prefix = StorageKey(array_bytes::hex2bytes_unchecked("3a"));
 	let child_prefix = StorageKey(b"sec".to_vec());
 
 	let res: Vec<_> = client
 		.storage_keys(block_hash, Some(&prefix), None)
 		.unwrap()
-		.map(|x| x.0)
+		.map(|x| array_bytes::bytes2hex("", &x.0))
 		.collect();
 	assert_eq!(
 		res,
 		[
-			child_root.clone(),
-			array_bytes::hex2bytes_unchecked("3a636f6465"), //":code"
-			array_bytes::hex2bytes_unchecked("3a65787472696e7369635f696e646578"), //":extrinsic_index"
-			array_bytes::hex2bytes_unchecked("3a686561707061676573"), //":heappages"
+			&child_root,
+			"3a636f6465",                       //":code"
+			"3a65787472696e7369635f696e646578", //":extrinsic_index"
 		]
 	);
 
@@ -1696,15 +1693,9 @@ fn storage_keys_prefix_and_start_key_works() {
 			Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f6465"))),
 		)
 		.unwrap()
-		.map(|x| x.0)
+		.map(|x| array_bytes::bytes2hex("", &x.0))
 		.collect();
-	assert_eq!(
-		res,
-		[
-			array_bytes::hex2bytes_unchecked("3a65787472696e7369635f696e646578"),
-			array_bytes::hex2bytes_unchecked("3a686561707061676573")
-		]
-	);
+	assert_eq!(res, ["3a65787472696e7369635f696e646578",]);
 
 	let res: Vec<_> = client
 		.storage_keys(
@@ -1737,7 +1728,7 @@ fn storage_keys_works() {
 	sp_tracing::try_init_simple();
 
 	let expected_keys =
-		substrate_test_runtime::storage_key_generator::get_expected_storage_hashed_keys();
+		substrate_test_runtime::storage_key_generator::get_expected_storage_hashed_keys(false);
 
 	let client = substrate_test_runtime_client::new();
 	let block_hash = client.info().best_hash;
@@ -1776,10 +1767,10 @@ fn storage_keys_works() {
 		res,
 		expected_keys
 			.iter()
-			.filter(|&i| i > &"3a636f64".to_string())
+			.filter(|&i| *i > "3a636f64")
 			.take(8)
 			.cloned()
-			.collect::<Vec<String>>()
+			.collect::<Vec<_>>()
 	);
 
 	// Starting at a complete key the first key is skipped.
@@ -1797,10 +1788,10 @@ fn storage_keys_works() {
 		res,
 		expected_keys
 			.iter()
-			.filter(|&i| i > &"3a636f6465".to_string())
+			.filter(|&i| *i > "3a636f6465")
 			.take(8)
 			.cloned()
-			.collect::<Vec<String>>()
+			.collect::<Vec<_>>()
 	);
 
 	const SOME_BALANCE_KEY : &str = "26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9e2c1dc507e2035edbbd8776c440d870460c57f0008067cc01c5ff9eb2e2f9b3a94299a915a91198bd1021a6c55596f57";
@@ -1818,10 +1809,10 @@ fn storage_keys_works() {
 		res,
 		expected_keys
 			.iter()
-			.filter(|&i| i > &SOME_BALANCE_KEY.to_string())
+			.filter(|&i| *i > SOME_BALANCE_KEY)
 			.take(8)
 			.cloned()
-			.collect::<Vec<String>>()
+			.collect::<Vec<_>>()
 	);
 }
 
@@ -1850,7 +1841,6 @@ fn cleans_up_closed_notification_sinks_on_block_import() {
 		genesis_block_builder,
 		None,
 		None,
-		None,
 		Box::new(TaskExecutor::new()),
 		client_config,
 	)
diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs
index 11c672db8cb..38a811acc74 100644
--- a/substrate/client/service/test/src/lib.rs
+++ b/substrate/client/service/test/src/lib.rs
@@ -245,7 +245,6 @@ fn node_config<
 		chain_spec: Box::new((*spec).clone()),
 		wasm_method: Default::default(),
 		wasm_runtime_overrides: Default::default(),
-		execution_strategies: Default::default(),
 		rpc_addr: Default::default(),
 		rpc_max_connections: Default::default(),
 		rpc_cors: None,
diff --git a/substrate/client/statement-store/src/lib.rs b/substrate/client/statement-store/src/lib.rs
index 4acb89a05f7..3b42641d9c5 100644
--- a/substrate/client/statement-store/src/lib.rs
+++ b/substrate/client/statement-store/src/lib.rs
@@ -59,7 +59,9 @@ use sp_blockchain::HeaderBackend;
 use sp_core::{hexdisplay::HexDisplay, traits::SpawnNamed, Decode, Encode};
 use sp_runtime::traits::Block as BlockT;
 use sp_statement_store::{
-	runtime_api::{InvalidStatement, StatementSource, ValidStatement, ValidateStatement},
+	runtime_api::{
+		InvalidStatement, StatementSource, StatementStoreExt, ValidStatement, ValidateStatement,
+	},
 	AccountId, BlockHash, Channel, DecryptionKey, Hash, NetworkPriority, Proof, Result, Statement,
 	SubmitResult, Topic,
 };
@@ -491,8 +493,7 @@ impl Store {
 			+ 'static,
 		Client::Api: ValidateStatement<Block>,
 	{
-		let store = Arc::new(Self::new(path, options, client.clone(), prometheus)?);
-		client.execution_extensions().register_statement_store(store.clone());
+		let store = Arc::new(Self::new(path, options, client, prometheus)?);
 
 		// Perform periodic statement store maintenance
 		let worker_store = store.clone();
@@ -696,6 +697,11 @@ impl Store {
 	fn set_time(&mut self, time: u64) {
 		self.time_override = Some(time);
 	}
+
+	/// Returns `self` as [`StatementStoreExt`].
+	pub fn as_statement_store_ext(self: Arc<Self>) -> StatementStoreExt {
+		StatementStoreExt::new(self)
+	}
 }
 
 impl StatementStore for Store {
diff --git a/substrate/client/transaction-pool/api/src/lib.rs b/substrate/client/transaction-pool/api/src/lib.rs
index e7b3a9c5e16..32fe30f4584 100644
--- a/substrate/client/transaction-pool/api/src/lib.rs
+++ b/substrate/client/transaction-pool/api/src/lib.rs
@@ -29,12 +29,7 @@ use sp_runtime::{
 	generic::BlockId,
 	traits::{Block as BlockT, Member, NumberFor},
 };
-use std::{
-	collections::HashMap,
-	hash::Hash,
-	pin::Pin,
-	sync::{Arc, Weak},
-};
+use std::{collections::HashMap, hash::Hash, marker::PhantomData, pin::Pin, sync::Arc};
 
 const LOG_TARGET: &str = "txpool::api";
 
@@ -354,6 +349,22 @@ pub trait LocalTransactionPool: Send + Sync {
 	) -> Result<Self::Hash, Self::Error>;
 }
 
+impl<T: LocalTransactionPool> LocalTransactionPool for Arc<T> {
+	type Block = T::Block;
+
+	type Hash = T::Hash;
+
+	type Error = T::Error;
+
+	fn submit_local(
+		&self,
+		at: <Self::Block as BlockT>::Hash,
+		xt: LocalTransactionFor<Self>,
+	) -> Result<Self::Hash, Self::Error> {
+		(**self).submit_local(at, xt)
+	}
+}
+
 /// An abstraction for [`LocalTransactionPool`]
 ///
 /// We want to use a transaction pool in [`OffchainTransactionPoolFactory`] in a `Arc` without
@@ -396,15 +407,13 @@ impl<TPool: LocalTransactionPool> OffchainSubmitTransaction<TPool::Block> for TP
 /// the wasm execution environment to send transactions from an offchain call to the  runtime.
 #[derive(Clone)]
 pub struct OffchainTransactionPoolFactory<Block: BlockT> {
-	// To break retain cycle between `Client` and `TransactionPool` we require this
-	// extension to be a `Weak` reference.
-	pool: Weak<dyn OffchainSubmitTransaction<Block>>,
+	pool: Arc<dyn OffchainSubmitTransaction<Block>>,
 }
 
 impl<Block: BlockT> OffchainTransactionPoolFactory<Block> {
 	/// Creates a new instance using the given `tx_pool`.
-	pub fn new<T: LocalTransactionPool<Block = Block> + 'static>(tx_pool: &Arc<T>) -> Self {
-		Self { pool: Arc::downgrade(tx_pool) as Weak<_> }
+	pub fn new<T: LocalTransactionPool<Block = Block> + 'static>(tx_pool: T) -> Self {
+		Self { pool: Arc::new(tx_pool) as Arc<_> }
 	}
 
 	/// Returns an instance of [`TransactionPoolExt`] bound to the given `block_hash`.
@@ -419,7 +428,7 @@ impl<Block: BlockT> OffchainTransactionPoolFactory<Block> {
 /// Wraps a `pool` and `block_hash` to implement [`sp_core::offchain::TransactionPool`].
 struct OffchainTransactionPool<Block: BlockT> {
 	block_hash: Block::Hash,
-	pool: Weak<dyn OffchainSubmitTransaction<Block>>,
+	pool: Arc<dyn OffchainSubmitTransaction<Block>>,
 }
 
 impl<Block: BlockT> sp_core::offchain::TransactionPool for OffchainTransactionPool<Block> {
@@ -436,7 +445,7 @@ impl<Block: BlockT> sp_core::offchain::TransactionPool for OffchainTransactionPo
 			},
 		};
 
-		self.pool.upgrade().ok_or(())?.submit_at(self.block_hash, extrinsic)
+		self.pool.submit_at(self.block_hash, extrinsic)
 	}
 }
 
@@ -463,6 +472,29 @@ mod v1_compatible {
 	}
 }
 
+/// Transaction pool that rejects all submitted transactions.
+///
+/// Could be used for example in tests.
+pub struct RejectAllTxPool<Block>(PhantomData<Block>);
+
+impl<Block> Default for RejectAllTxPool<Block> {
+	fn default() -> Self {
+		Self(PhantomData)
+	}
+}
+
+impl<Block: BlockT> LocalTransactionPool for RejectAllTxPool<Block> {
+	type Block = Block;
+
+	type Hash = Block::Hash;
+
+	type Error = error::Error;
+
+	fn submit_local(&self, _: Block::Hash, _: Block::Extrinsic) -> Result<Self::Hash, Self::Error> {
+		Err(error::Error::ImmediatelyDropped)
+	}
+}
+
 #[cfg(test)]
 mod tests {
 	use super::*;
diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs
index 1b438bd7e4f..80e5925194c 100644
--- a/substrate/client/transaction-pool/src/lib.rs
+++ b/substrate/client/transaction-pool/src/lib.rs
@@ -52,8 +52,8 @@ use std::{
 use graph::{ExtrinsicHash, IsValidator};
 use sc_transaction_pool_api::{
 	error::Error as TxPoolError, ChainEvent, ImportNotificationStream, MaintainedTransactionPool,
-	OffchainTransactionPoolFactory, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor,
-	TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash,
+	PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, TransactionPool, TransactionSource,
+	TransactionStatusStreamFor, TxHash,
 };
 use sp_core::traits::SpawnEssentialNamed;
 use sp_runtime::{
@@ -396,11 +396,6 @@ where
 			client.usage_info().chain.finalized_hash,
 		));
 
-		// make transaction pool available for off-chain runtime calls.
-		client
-			.execution_extensions()
-			.register_transaction_pool_factory(OffchainTransactionPoolFactory::new(&pool));
-
 		pool
 	}
 }
diff --git a/substrate/frame/benchmarking/README.md b/substrate/frame/benchmarking/README.md
index 76673c5f69b..dc6a184435d 100644
--- a/substrate/frame/benchmarking/README.md
+++ b/substrate/frame/benchmarking/README.md
@@ -175,7 +175,6 @@ Then you can run a benchmark like so:
 ```bash
 ./target/production/substrate benchmark pallet \
     --chain dev \                  # Configurable Chain Spec
-    --execution=wasm \             # Always test with Wasm
     --wasm-execution=compiled \    # Always used `wasm-time`
     --pallet pallet_balances \     # Select the pallet
     --extrinsic transfer \         # Select the extrinsic
diff --git a/substrate/frame/nfts/src/weights.rs b/substrate/frame/nfts/src/weights.rs
index 686fabb2a70..6b8c577bb12 100644
--- a/substrate/frame/nfts/src/weights.rs
+++ b/substrate/frame/nfts/src/weights.rs
@@ -35,7 +35,6 @@
 // --no-median-slopes
 // --no-min-squares
 // --extrinsic=*
-// --execution=wasm
 // --wasm-execution=compiled
 // --heap-pages=4096
 // --output=./frame/nfts/src/weights.rs
diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml
index ded866ed481..c6d197b8d35 100644
--- a/substrate/primitives/api/Cargo.toml
+++ b/substrate/primitives/api/Cargo.toml
@@ -18,6 +18,7 @@ sp-api-proc-macro = { version = "4.0.0-dev", path = "proc-macro" }
 sp-core = { version = "21.0.0", default-features = false, path = "../core" }
 sp-std = { version = "8.0.0", default-features = false, path = "../std" }
 sp-runtime = { version = "24.0.0", default-features = false, path = "../runtime" }
+sp-externalities = { version = "0.19.0", default-features = false, optional = true, path = "../externalities" }
 sp-version = { version = "22.0.0", default-features = false, path = "../version" }
 sp-state-machine = { version = "0.28.0", default-features = false, optional = true, path = "../state-machine" }
 sp-trie = { version = "22.0.0", default-features = false, optional = true, path = "../trie" }
@@ -35,6 +36,7 @@ default = ["std"]
 std = [
 	"codec/std",
 	"sp-core/std",
+	"sp-externalities",
 	"sp-std/std",
 	"sp-runtime/std",
 	"sp-state-machine/std",
diff --git a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs
index 052dc896f2c..370735819f9 100644
--- a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs
+++ b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs
@@ -315,7 +315,6 @@ impl<'a> ToClientSideDecl<'a> {
 			fn __runtime_api_internal_call_api_at(
 				&self,
 				at: #block_hash,
-				context: #crate_::ExecutionContext,
 				params: std::vec::Vec<u8>,
 				fn_name: &dyn Fn(#crate_::RuntimeVersion) -> &'static str,
 			) -> std::result::Result<std::vec::Vec<u8>, #crate_::ApiError>;
@@ -335,9 +334,8 @@ impl<'a> ToClientSideDecl<'a> {
 
 		items.into_iter().for_each(|i| match i {
 			TraitItem::Fn(method) => {
-				let (fn_decl, fn_decl_ctx) = self.fold_trait_item_fn(method, trait_generics_num);
+				let fn_decl = self.create_method_decl(method, trait_generics_num);
 				result.push(fn_decl.into());
-				result.push(fn_decl_ctx.into());
 			},
 			r => result.push(r),
 		});
@@ -345,41 +343,12 @@ impl<'a> ToClientSideDecl<'a> {
 		result
 	}
 
-	fn fold_trait_item_fn(
-		&mut self,
-		method: TraitItemFn,
-		trait_generics_num: usize,
-	) -> (TraitItemFn, TraitItemFn) {
-		let crate_ = self.crate_;
-		let context = quote!( #crate_::ExecutionContext::OffchainCall(None) );
-		let fn_decl = self.create_method_decl(method.clone(), context, trait_generics_num);
-		let fn_decl_ctx = self.create_method_decl_with_context(method, trait_generics_num);
-
-		(fn_decl, fn_decl_ctx)
-	}
-
-	fn create_method_decl_with_context(
-		&mut self,
-		method: TraitItemFn,
-		trait_generics_num: usize,
-	) -> TraitItemFn {
-		let crate_ = self.crate_;
-		let context_arg: syn::FnArg = parse_quote!( context: #crate_::ExecutionContext );
-		let mut fn_decl_ctx = self.create_method_decl(method, quote!(context), trait_generics_num);
-		fn_decl_ctx.sig.ident =
-			Ident::new(&format!("{}_with_context", &fn_decl_ctx.sig.ident), Span::call_site());
-		fn_decl_ctx.sig.inputs.insert(2, context_arg);
-
-		fn_decl_ctx
-	}
-
 	/// Takes the method declared by the user and creates the declaration we require for the runtime
 	/// api client side. This method will call by default the `method_runtime_api_impl` for doing
 	/// the actual call into the runtime.
 	fn create_method_decl(
 		&mut self,
 		mut method: TraitItemFn,
-		context: TokenStream,
 		trait_generics_num: usize,
 	) -> TraitItemFn {
 		let params = match extract_parameter_names_types_and_borrows(
@@ -467,7 +436,6 @@ impl<'a> ToClientSideDecl<'a> {
 				<Self as #trait_name<#( #underscores ),*>>::__runtime_api_internal_call_api_at(
 					self,
 					__runtime_api_at_param__,
-					#context,
 					__runtime_api_impl_params_encoded__,
 					&|_version| {
 						#(
diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs
index 028cc6a675e..e45f05029d9 100644
--- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs
+++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs
@@ -224,8 +224,8 @@ fn generate_runtime_api_base_structures() -> Result<TokenStream> {
 
 	Ok(quote!(
 		pub struct RuntimeApi {}
-		/// Implements all runtime apis for the client side.
 		#crate_::std_enabled! {
+			/// Implements all runtime apis for the client side.
 			pub struct RuntimeApiImpl<Block: #crate_::BlockT, C: #crate_::CallApiAt<Block> + 'static> {
 				call: &'static C,
 				transaction_depth: std::cell::RefCell<u16>,
@@ -234,6 +234,9 @@ fn generate_runtime_api_base_structures() -> Result<TokenStream> {
 					#crate_::StorageTransactionCache<Block, C::StateBackend>
 				>,
 				recorder: std::option::Option<#crate_::ProofRecorder<Block>>,
+				call_context: #crate_::CallContext,
+				extensions: std::cell::RefCell<#crate_::Extensions>,
+				extensions_generated_for: std::cell::RefCell<std::option::Option<Block::Hash>>,
 			}
 
 			impl<Block: #crate_::BlockT, C: #crate_::CallApiAt<Block>> #crate_::ApiExt<Block> for
@@ -321,6 +324,14 @@ fn generate_runtime_api_base_structures() -> Result<TokenStream> {
 							state_version,
 						)
 					}
+
+				fn set_call_context(&mut self, call_context: #crate_::CallContext) {
+					self.call_context = call_context;
+				}
+
+				fn register_extension<E: #crate_::Extension>(&mut self, extension: E) {
+					std::cell::RefCell::borrow_mut(&self.extensions).register(extension);
+				}
 			}
 
 			impl<Block: #crate_::BlockT, C> #crate_::ConstructRuntimeApi<Block, C>
@@ -339,6 +350,9 @@ fn generate_runtime_api_base_structures() -> Result<TokenStream> {
 						changes: std::default::Default::default(),
 						recorder: std::default::Default::default(),
 						storage_transaction_cache: std::default::Default::default(),
+						call_context: #crate_::CallContext::Offchain,
+						extensions: std::default::Default::default(),
+						extensions_generated_for: std::default::Default::default(),
 					}.into()
 				}
 			}
@@ -480,7 +494,6 @@ impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> {
 			fn __runtime_api_internal_call_api_at(
 				&self,
 				at: <__SrApiBlock__ as #crate_::BlockT>::Hash,
-				context: #crate_::ExecutionContext,
 				params: std::vec::Vec<u8>,
 				fn_name: &dyn Fn(#crate_::RuntimeVersion) -> &'static str,
 			) -> std::result::Result<std::vec::Vec<u8>, #crate_::ApiError> {
@@ -498,14 +511,34 @@ impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> {
 						at,
 					)?;
 
+					match &mut *std::cell::RefCell::borrow_mut(&self.extensions_generated_for) {
+						Some(generated_for) => {
+							if *generated_for != at {
+								return std::result::Result::Err(
+									#crate_::ApiError::UsingSameInstanceForDifferentBlocks
+								)
+							}
+						},
+						generated_for @ None => {
+							#crate_::CallApiAt::<__SrApiBlock__>::initialize_extensions(
+								self.call,
+								at,
+								&mut std::cell::RefCell::borrow_mut(&self.extensions),
+							)?;
+
+							*generated_for = Some(at);
+						}
+					}
+
 					let params = #crate_::CallApiAtParams {
 						at,
 						function: (*fn_name)(version),
 						arguments: params,
 						overlayed_changes: &self.changes,
 						storage_transaction_cache: &self.storage_transaction_cache,
-						context,
+						call_context: self.call_context,
 						recorder: &self.recorder,
+						extensions: &self.extensions,
 					};
 
 					#crate_::CallApiAt::<__SrApiBlock__>::call_api_at(
diff --git a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs
index be8c8ca0f85..f85be81cc7f 100644
--- a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs
+++ b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs
@@ -121,13 +121,20 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result<To
 			> where Self: Sized {
 				unimplemented!("`into_storage_changes` not implemented for runtime api mocks")
 			}
+
+			fn set_call_context(&mut self, _: #crate_::CallContext) {
+				unimplemented!("`set_call_context` not implemented for runtime api mocks")
+			}
+
+			fn register_extension<E: #crate_::Extension>(&mut self, _: E) {
+				unimplemented!("`register_extension` not implemented for runtime api mocks")
+			}
 		}
 
 		impl #crate_::Core<#block_type> for #self_ty {
 			fn __runtime_api_internal_call_api_at(
 				&self,
 				_: <#block_type as #crate_::BlockT>::Hash,
-				_: #crate_::ExecutionContext,
 				_: std::vec::Vec<u8>,
 				_: &dyn Fn(#crate_::RuntimeVersion) -> &'static str,
 			) -> std::result::Result<std::vec::Vec<u8>, #crate_::ApiError> {
@@ -141,14 +148,6 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result<To
 				unimplemented!("`Core::version` not implemented for runtime api mocks")
 			}
 
-			fn version_with_context(
-				&self,
-				_: <#block_type as #crate_::BlockT>::Hash,
-				_: #crate_::ExecutionContext,
-			) -> std::result::Result<#crate_::RuntimeVersion, #crate_::ApiError> {
-				unimplemented!("`Core::version` not implemented for runtime api mocks")
-			}
-
 			fn execute_block(
 				&self,
 				_: <#block_type as #crate_::BlockT>::Hash,
@@ -157,15 +156,6 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result<To
 				unimplemented!("`Core::execute_block` not implemented for runtime api mocks")
 			}
 
-			fn execute_block_with_context(
-				&self,
-				_: <#block_type as #crate_::BlockT>::Hash,
-				_: #crate_::ExecutionContext,
-				_: #block_type,
-			) -> std::result::Result<(), #crate_::ApiError> {
-				unimplemented!("`Core::execute_block` not implemented for runtime api mocks")
-			}
-
 			fn initialize_block(
 				&self,
 				_: <#block_type as #crate_::BlockT>::Hash,
@@ -173,15 +163,6 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result<To
 			) -> std::result::Result<(), #crate_::ApiError> {
 				unimplemented!("`Core::initialize_block` not implemented for runtime api mocks")
 			}
-
-			fn initialize_block_with_context(
-				&self,
-				_: <#block_type as #crate_::BlockT>::Hash,
-				_: #crate_::ExecutionContext,
-				_: &<#block_type as #crate_::BlockT>::Header,
-			) -> std::result::Result<(), #crate_::ApiError> {
-				unimplemented!("`Core::initialize_block` not implemented for runtime api mocks")
-			}
 		}
 	))
 }
@@ -255,26 +236,12 @@ impl<'a> FoldRuntimeApiImpl<'a> {
 
 		let crate_ = generate_crate_access();
 
-		// We also need to overwrite all the `_with_context` methods. To do this,
-		// we clone all methods and add them again with the new name plus one more argument.
-		impl_item.items.extend(impl_item.items.clone().into_iter().filter_map(|i| {
-			if let syn::ImplItem::Fn(mut m) = i {
-				m.sig.ident = quote::format_ident!("{}_with_context", m.sig.ident);
-				m.sig.inputs.insert(2, parse_quote!( _: #crate_::ExecutionContext ));
-
-				Some(m.into())
-			} else {
-				None
-			}
-		}));
-
 		let block_type = self.block_type;
 
 		impl_item.items.push(parse_quote! {
 			fn __runtime_api_internal_call_api_at(
 				&self,
 				_: <#block_type as #crate_::BlockT>::Hash,
-				_: #crate_::ExecutionContext,
 				_: std::vec::Vec<u8>,
 				_: &dyn Fn(#crate_::RuntimeVersion) -> &'static str,
 			) -> std::result::Result<std::vec::Vec<u8>, #crate_::ApiError> {
diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs
index 78c4cd73a18..e683d7aa637 100644
--- a/substrate/primitives/api/src/lib.rs
+++ b/substrate/primitives/api/src/lib.rs
@@ -78,11 +78,17 @@ pub use hash_db::Hasher;
 #[doc(hidden)]
 pub use scale_info;
 #[doc(hidden)]
+pub use sp_core::offchain;
+#[doc(hidden)]
 #[cfg(not(feature = "std"))]
 pub use sp_core::to_substrate_wasm_fn_return_value;
+#[doc(hidden)]
+#[cfg(feature = "std")]
+pub use sp_core::traits::CallContext;
 use sp_core::OpaqueMetadata;
 #[doc(hidden)]
-pub use sp_core::{offchain, ExecutionContext};
+#[cfg(feature = "std")]
+pub use sp_externalities::{Extension, Extensions};
 #[doc(hidden)]
 #[cfg(feature = "frame-metadata")]
 pub use sp_metadata_ir::{self as metadata_ir, frame_metadata as metadata};
@@ -518,6 +524,8 @@ pub enum ApiError {
 	Application(#[from] Box<dyn std::error::Error + Send + Sync>),
 	#[error("Api called for an unknown Block: {0}")]
 	UnknownBlock(String),
+	#[error("Using the same api instance to call into multiple independent blocks.")]
+	UsingSameInstanceForDifferentBlocks,
 }
 
 /// Extends the runtime api implementation with some common functionality.
@@ -581,6 +589,12 @@ pub trait ApiExt<Block: BlockT> {
 	) -> Result<StorageChanges<Self::StateBackend, Block>, String>
 	where
 		Self: Sized;
+
+	/// Set the [`CallContext`] to be used by the runtime api calls done by this instance.
+	fn set_call_context(&mut self, call_context: CallContext);
+
+	/// Register an [`Extension`] that will be accessible while executing a runtime api call.
+	fn register_extension<E: Extension>(&mut self, extension: E);
 }
 
 /// Parameters for [`CallApiAt::call_api_at`].
@@ -596,10 +610,12 @@ pub struct CallApiAtParams<'a, Block: BlockT, Backend: StateBackend<HashFor<Bloc
 	pub overlayed_changes: &'a RefCell<OverlayedChanges>,
 	/// The cache for storage transactions.
 	pub storage_transaction_cache: &'a RefCell<StorageTransactionCache<Block, Backend>>,
-	/// The context this function is executed in.
-	pub context: ExecutionContext,
+	/// The call context of this call.
+	pub call_context: CallContext,
 	/// The optional proof recorder for recording storage accesses.
 	pub recorder: &'a Option<ProofRecorder<Block>>,
+	/// The extensions that should be used for this call.
+	pub extensions: &'a RefCell<Extensions>,
 }
 
 /// Something that can call into the an api at a given block.
@@ -620,6 +636,13 @@ pub trait CallApiAt<Block: BlockT> {
 
 	/// Get the state `at` the given block.
 	fn state_at(&self, at: Block::Hash) -> Result<Self::StateBackend, ApiError>;
+
+	/// Initialize the `extensions` for the given block `at` by using the global extensions factory.
+	fn initialize_extensions(
+		&self,
+		at: Block::Hash,
+		extensions: &mut Extensions,
+	) -> Result<(), ApiError>;
 }
 
 /// Auxiliary wrapper that holds an api instance and binds it to the given lifetime.
diff --git a/substrate/primitives/api/test/benches/bench.rs b/substrate/primitives/api/test/benches/bench.rs
index 88ebdbc6134..45bea08af6d 100644
--- a/substrate/primitives/api/test/benches/bench.rs
+++ b/substrate/primitives/api/test/benches/bench.rs
@@ -17,7 +17,6 @@
 
 use criterion::{criterion_group, criterion_main, Criterion};
 use sp_api::ProvideRuntimeApi;
-use sp_state_machine::ExecutionStrategy;
 use substrate_test_runtime_client::{
 	runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt,
 };
@@ -56,17 +55,13 @@ fn sp_api_benchmark(c: &mut Criterion) {
 	});
 
 	c.bench_function("calling function by function pointer in wasm", |b| {
-		let client = TestClientBuilder::new()
-			.set_execution_strategy(ExecutionStrategy::AlwaysWasm)
-			.build();
+		let client = TestClientBuilder::new().build();
 		let best_hash = client.chain_info().best_hash;
 		b.iter(|| client.runtime_api().benchmark_indirect_call(best_hash).unwrap())
 	});
 
-	c.bench_function("calling function in wasm", |b| {
-		let client = TestClientBuilder::new()
-			.set_execution_strategy(ExecutionStrategy::AlwaysWasm)
-			.build();
+	c.bench_function("calling function", |b| {
+		let client = TestClientBuilder::new().build();
 		let best_hash = client.chain_info().best_hash;
 		b.iter(|| client.runtime_api().benchmark_direct_call(best_hash).unwrap())
 	});
diff --git a/substrate/primitives/api/test/tests/runtime_calls.rs b/substrate/primitives/api/test/tests/runtime_calls.rs
index 344c2d31eb0..92118c16cdd 100644
--- a/substrate/primitives/api/test/tests/runtime_calls.rs
+++ b/substrate/primitives/api/test/tests/runtime_calls.rs
@@ -22,9 +22,8 @@ use sp_runtime::{
 	traits::{HashFor, Header as HeaderT},
 	TransactionOutcome,
 };
-use sp_state_machine::{
-	create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionStrategy,
-};
+use sp_state_machine::{create_proof_check_backend, execution_proof_check_on_trie_backend};
+
 use substrate_test_runtime_client::{
 	prelude::*,
 	runtime::{Block, Header, TestAPI, Transfer},
@@ -36,29 +35,18 @@ use sc_block_builder::BlockBuilderProvider;
 use sp_consensus::SelectChain;
 use substrate_test_runtime_client::sc_executor::WasmExecutor;
 
-fn calling_function_with_strat(strat: ExecutionStrategy) {
-	let client = TestClientBuilder::new().set_execution_strategy(strat).build();
+#[test]
+fn calling_runtime_function() {
+	let client = TestClientBuilder::new().build();
 	let runtime_api = client.runtime_api();
 	let best_hash = client.chain_info().best_hash;
 
 	assert_eq!(runtime_api.benchmark_add_one(best_hash, &1).unwrap(), 2);
 }
 
-#[test]
-fn calling_native_runtime_function() {
-	calling_function_with_strat(ExecutionStrategy::NativeWhenPossible);
-}
-
-#[test]
-fn calling_wasm_runtime_function() {
-	calling_function_with_strat(ExecutionStrategy::AlwaysWasm);
-}
-
 #[test]
 fn calling_native_runtime_signature_changed_function() {
-	let client = TestClientBuilder::new()
-		.set_execution_strategy(ExecutionStrategy::NativeWhenPossible)
-		.build();
+	let client = TestClientBuilder::new().build();
 	let runtime_api = client.runtime_api();
 	let best_hash = client.chain_info().best_hash;
 
@@ -67,9 +55,7 @@ fn calling_native_runtime_signature_changed_function() {
 
 #[test]
 fn use_trie_function() {
-	let client = TestClientBuilder::new()
-		.set_execution_strategy(ExecutionStrategy::AlwaysWasm)
-		.build();
+	let client = TestClientBuilder::new().build();
 	let runtime_api = client.runtime_api();
 	let best_hash = client.chain_info().best_hash;
 	assert_eq!(runtime_api.use_trie(best_hash).unwrap(), 2);
@@ -77,7 +63,7 @@ fn use_trie_function() {
 
 #[test]
 fn initialize_block_works() {
-	let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build();
+	let client = TestClientBuilder::new().build();
 	let runtime_api = client.runtime_api();
 	let best_hash = client.chain_info().best_hash;
 	runtime_api
@@ -97,9 +83,7 @@ fn initialize_block_works() {
 
 #[test]
 fn record_proof_works() {
-	let (client, longest_chain) = TestClientBuilder::new()
-		.set_execution_strategy(ExecutionStrategy::Both)
-		.build_with_longest_chain();
+	let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain();
 
 	let storage_root =
 		*futures::executor::block_on(longest_chain.best_chain()).unwrap().state_root();
@@ -151,7 +135,7 @@ fn record_proof_works() {
 
 #[test]
 fn call_runtime_api_with_multiple_arguments() {
-	let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build();
+	let client = TestClientBuilder::new().build();
 
 	let data = vec![1, 2, 4, 5, 6, 7, 8, 8, 10, 12];
 	let best_hash = client.chain_info().best_hash;
@@ -166,8 +150,7 @@ fn disable_logging_works() {
 	if std::env::var("RUN_TEST").is_ok() {
 		sp_tracing::try_init_simple();
 
-		let mut builder =
-			TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm);
+		let mut builder = TestClientBuilder::new();
 		builder.genesis_init_mut().set_wasm_code(
 			substrate_test_runtime_client::runtime::wasm_binary_logging_disabled_unwrap().to_vec(),
 		);
diff --git a/substrate/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/substrate/primitives/api/test/tests/ui/mock_only_self_reference.stderr
index 430f63eee16..f088e8f2de5 100644
--- a/substrate/primitives/api/test/tests/ui/mock_only_self_reference.stderr
+++ b/substrate/primitives/api/test/tests/ui/mock_only_self_reference.stderr
@@ -48,42 +48,3 @@ error[E0050]: method `test2` has 2 parameters but the declaration in trait `Api:
    | |_^ expected 3 parameters, found 2
    |
    = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info)
-
-error[E0050]: method `test_with_context` has 3 parameters but the declaration in trait `Api::test_with_context` has 4
-  --> tests/ui/mock_only_self_reference.rs:12:1
-   |
-3  | / sp_api::decl_runtime_apis! {
-4  | |     pub trait Api {
-5  | |         fn test(data: u64);
-   | |_________________________- trait requires 4 parameters
-...
-12 | / sp_api::mock_impl_runtime_apis! {
-13 | |     impl Api<Block> for MockApi {
-14 | |         fn test(self, data: u64) {}
-15 | |
-16 | |         fn test2(&mut self, data: u64) {}
-17 | |     }
-18 | | }
-   | |_^ expected 4 parameters, found 3
-   |
-   = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info)
-
-error[E0050]: method `test2_with_context` has 3 parameters but the declaration in trait `Api::test2_with_context` has 4
-  --> tests/ui/mock_only_self_reference.rs:12:1
-   |
-3  | / sp_api::decl_runtime_apis! {
-4  | |     pub trait Api {
-5  | |         fn test(data: u64);
-6  | |         fn test2(data: u64);
-   | |__________________________- trait requires 4 parameters
-...
-12 | / sp_api::mock_impl_runtime_apis! {
-13 | |     impl Api<Block> for MockApi {
-14 | |         fn test(self, data: u64) {}
-15 | |
-16 | |         fn test2(&mut self, data: u64) {}
-17 | |     }
-18 | | }
-   | |_^ expected 4 parameters, found 3
-   |
-   = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info)
diff --git a/substrate/primitives/application-crypto/test/src/ecdsa.rs b/substrate/primitives/application-crypto/test/src/ecdsa.rs
index 99ca6f4c4ad..396683a91ac 100644
--- a/substrate/primitives/application-crypto/test/src/ecdsa.rs
+++ b/substrate/primitives/application-crypto/test/src/ecdsa.rs
@@ -16,13 +16,13 @@
 // limitations under the License.
 
 //! Integration tests for ecdsa
-use sp_api::ProvideRuntimeApi;
+use sp_api::{ApiExt, ProvideRuntimeApi};
 use sp_application_crypto::ecdsa::AppPair;
 use sp_core::{
 	crypto::{ByteArray, Pair},
 	testing::ECDSA,
 };
-use sp_keystore::{testing::MemoryKeystore, Keystore};
+use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt};
 use std::sync::Arc;
 use substrate_test_runtime_client::{
 	runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt,
@@ -31,9 +31,12 @@ use substrate_test_runtime_client::{
 #[test]
 fn ecdsa_works_in_runtime() {
 	let keystore = Arc::new(MemoryKeystore::new());
-	let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build();
-	let (signature, public) = test_client
-		.runtime_api()
+	let test_client = TestClientBuilder::new().build();
+
+	let mut runtime_api = test_client.runtime_api();
+	runtime_api.register_extension(KeystoreExt::new(keystore.clone()));
+
+	let (signature, public) = runtime_api
 		.test_ecdsa_crypto(test_client.chain_info().genesis_hash)
 		.expect("Tests `ecdsa` crypto.");
 
diff --git a/substrate/primitives/application-crypto/test/src/ed25519.rs b/substrate/primitives/application-crypto/test/src/ed25519.rs
index f4553f95bf1..f0ceccdcebf 100644
--- a/substrate/primitives/application-crypto/test/src/ed25519.rs
+++ b/substrate/primitives/application-crypto/test/src/ed25519.rs
@@ -17,13 +17,13 @@
 
 //! Integration tests for ed25519
 
-use sp_api::ProvideRuntimeApi;
+use sp_api::{ApiExt, ProvideRuntimeApi};
 use sp_application_crypto::ed25519::AppPair;
 use sp_core::{
 	crypto::{ByteArray, Pair},
 	testing::ED25519,
 };
-use sp_keystore::{testing::MemoryKeystore, Keystore};
+use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt};
 use std::sync::Arc;
 use substrate_test_runtime_client::{
 	runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt,
@@ -32,9 +32,12 @@ use substrate_test_runtime_client::{
 #[test]
 fn ed25519_works_in_runtime() {
 	let keystore = Arc::new(MemoryKeystore::new());
-	let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build();
-	let (signature, public) = test_client
-		.runtime_api()
+	let test_client = TestClientBuilder::new().build();
+
+	let mut runtime_api = test_client.runtime_api();
+	runtime_api.register_extension(KeystoreExt::new(keystore.clone()));
+
+	let (signature, public) = runtime_api
 		.test_ed25519_crypto(test_client.chain_info().genesis_hash)
 		.expect("Tests `ed25519` crypto.");
 
diff --git a/substrate/primitives/application-crypto/test/src/sr25519.rs b/substrate/primitives/application-crypto/test/src/sr25519.rs
index 736521d7d9f..3c62270395f 100644
--- a/substrate/primitives/application-crypto/test/src/sr25519.rs
+++ b/substrate/primitives/application-crypto/test/src/sr25519.rs
@@ -17,13 +17,13 @@
 
 //! Integration tests for sr25519
 
-use sp_api::ProvideRuntimeApi;
+use sp_api::{ApiExt, ProvideRuntimeApi};
 use sp_application_crypto::sr25519::AppPair;
 use sp_core::{
 	crypto::{ByteArray, Pair},
 	testing::SR25519,
 };
-use sp_keystore::{testing::MemoryKeystore, Keystore};
+use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt};
 use std::sync::Arc;
 use substrate_test_runtime_client::{
 	runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt,
@@ -32,9 +32,12 @@ use substrate_test_runtime_client::{
 #[test]
 fn sr25519_works_in_runtime() {
 	let keystore = Arc::new(MemoryKeystore::new());
-	let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build();
-	let (signature, public) = test_client
-		.runtime_api()
+	let test_client = TestClientBuilder::new().build();
+
+	let mut runtime_api = test_client.runtime_api();
+	runtime_api.register_extension(KeystoreExt::new(keystore.clone()));
+
+	let (signature, public) = runtime_api
 		.test_sr25519_crypto(test_client.chain_info().genesis_hash)
 		.expect("Tests `sr25519` crypto.");
 
diff --git a/substrate/primitives/consensus/common/src/lib.rs b/substrate/primitives/consensus/common/src/lib.rs
index 215b4448b4a..d47f3bafc42 100644
--- a/substrate/primitives/consensus/common/src/lib.rs
+++ b/substrate/primitives/consensus/common/src/lib.rs
@@ -71,16 +71,6 @@ pub enum BlockOrigin {
 	File,
 }
 
-impl From<BlockOrigin> for sp_core::ExecutionContext {
-	fn from(origin: BlockOrigin) -> Self {
-		if origin == BlockOrigin::NetworkInitialSync {
-			sp_core::ExecutionContext::Syncing
-		} else {
-			sp_core::ExecutionContext::Importing
-		}
-	}
-}
-
 /// Environment for a Consensus instance.
 ///
 /// Creates proposer instance.
diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml
index 65c252a5231..f0da73e8ec7 100644
--- a/substrate/primitives/core/Cargo.toml
+++ b/substrate/primitives/core/Cargo.toml
@@ -39,6 +39,7 @@ sp-externalities = { version = "0.19.0", optional = true, path = "../externaliti
 futures = { version = "0.3.21", optional = true }
 dyn-clonable = { version = "0.9.0", optional = true }
 thiserror = { version = "1.0.30", optional = true }
+tracing = { version = "0.1.29", optional = true }
 bitflags = "1.3"
 paste = "1.0.7"
 
@@ -113,6 +114,7 @@ std = [
 	"futures/thread-pool",
 	"libsecp256k1/std",
 	"dyn-clonable",
+	"tracing",
 ]
 
 # Serde support without relying on std features.
diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs
index 951b481253b..da3b4381909 100644
--- a/substrate/primitives/core/src/lib.rs
+++ b/substrate/primitives/core/src/lib.rs
@@ -98,45 +98,6 @@ pub use sp_storage as storage;
 #[doc(hidden)]
 pub use sp_std;
 
-/// Context for executing a call into the runtime.
-pub enum ExecutionContext {
-	/// Context used for general block import (including locally authored blocks).
-	Importing,
-	/// Context used for importing blocks as part of an initial sync of the blockchain.
-	///
-	/// We distinguish between major sync and import so that validators who are running
-	/// their initial sync (or catching up after some time offline) can use the faster
-	/// native runtime (since we can reasonably assume the network as a whole has already
-	/// come to a broad consensus on the block and it probably hasn't been crafted
-	/// specifically to attack this node), but when importing blocks at the head of the
-	/// chain in normal operation they can use the safer Wasm version.
-	Syncing,
-	/// Context used for block construction.
-	BlockConstruction,
-	/// Context used for offchain calls.
-	///
-	/// This allows passing offchain extension and customizing available capabilities.
-	OffchainCall(Option<(Box<dyn offchain::Externalities>, offchain::Capabilities)>),
-}
-
-impl ExecutionContext {
-	/// Returns the capabilities of particular context.
-	pub fn capabilities(&self) -> offchain::Capabilities {
-		use ExecutionContext::*;
-
-		match self {
-			Importing | Syncing | BlockConstruction => offchain::Capabilities::empty(),
-			// Enable keystore, transaction pool and Offchain DB reads by default for offchain
-			// calls.
-			OffchainCall(None) =>
-				offchain::Capabilities::KEYSTORE |
-					offchain::Capabilities::OFFCHAIN_DB_READ |
-					offchain::Capabilities::TRANSACTION_POOL,
-			OffchainCall(Some((_, capabilities))) => *capabilities,
-		}
-	}
-}
-
 /// Hex-serialized shim for `Vec<u8>`.
 #[derive(PartialEq, Eq, Clone, RuntimeDebug)]
 #[cfg_attr(feature = "serde", derive(Serialize, Deserialize, Hash, PartialOrd, Ord))]
diff --git a/substrate/primitives/core/src/offchain/mod.rs b/substrate/primitives/core/src/offchain/mod.rs
index a6cef85e6ac..cef495dfaac 100644
--- a/substrate/primitives/core/src/offchain/mod.rs
+++ b/substrate/primitives/core/src/offchain/mod.rs
@@ -260,26 +260,22 @@ impl Timestamp {
 bitflags::bitflags! {
 	/// Execution context extra capabilities.
 	pub struct Capabilities: u32 {
-		/// Access to transaction pool.
-		const TRANSACTION_POOL = 0b0000_0000_0001;
 		/// External http calls.
-		const HTTP = 0b0000_0000_0010;
+		const HTTP = 1 << 0;
 		/// Keystore access.
-		const KEYSTORE = 0b0000_0000_0100;
+		const KEYSTORE = 1 << 2;
 		/// Randomness source.
-		const RANDOMNESS = 0b0000_0000_1000;
+		const RANDOMNESS = 1 << 3;
 		/// Access to opaque network state.
-		const NETWORK_STATE = 0b0000_0001_0000;
+		const NETWORK_STATE = 1 << 4;
 		/// Access to offchain worker DB (read only).
-		const OFFCHAIN_DB_READ = 0b0000_0010_0000;
+		const OFFCHAIN_DB_READ = 1 << 5;
 		/// Access to offchain worker DB (writes).
-		const OFFCHAIN_DB_WRITE = 0b0000_0100_0000;
+		const OFFCHAIN_DB_WRITE = 1 << 6;
 		/// Manage the authorized nodes
-		const NODE_AUTHORIZATION = 0b0000_1000_0000;
+		const NODE_AUTHORIZATION = 1 << 7;
 		/// Access time related functionality
-		const TIME = 0b0001_0000_0000;
-		/// Access the statement store.
-		const STATEMENT_STORE = 0b0010_0000_0000;
+		const TIME = 1 << 8;
 	}
 }
 
@@ -785,8 +781,8 @@ mod tests {
 		assert!(!none.contains(Capabilities::KEYSTORE));
 		assert!(all.contains(Capabilities::KEYSTORE));
 		assert!(some.contains(Capabilities::KEYSTORE));
-		assert!(!none.contains(Capabilities::TRANSACTION_POOL));
-		assert!(all.contains(Capabilities::TRANSACTION_POOL));
-		assert!(!some.contains(Capabilities::TRANSACTION_POOL));
+		assert!(!none.contains(Capabilities::RANDOMNESS));
+		assert!(all.contains(Capabilities::RANDOMNESS));
+		assert!(!some.contains(Capabilities::TIME));
 	}
 }
diff --git a/substrate/primitives/core/src/offchain/storage.rs b/substrate/primitives/core/src/offchain/storage.rs
index 3a114de5bfa..4db839f1a45 100644
--- a/substrate/primitives/core/src/offchain/storage.rs
+++ b/substrate/primitives/core/src/offchain/storage.rs
@@ -17,12 +17,14 @@
 
 //! In-memory implementation of offchain workers database.
 
-use crate::offchain::OffchainStorage;
+use crate::offchain::{DbExternalities, OffchainStorage, StorageKind, STORAGE_PREFIX};
 use std::{
 	collections::hash_map::{Entry, HashMap},
 	iter::Iterator,
 };
 
+const LOG_TARGET: &str = "offchain-worker::storage";
+
 /// In-memory storage for offchain workers.
 #[derive(Debug, Clone, Default)]
 pub struct InMemOffchainStorage {
@@ -88,3 +90,95 @@ impl OffchainStorage for InMemOffchainStorage {
 		}
 	}
 }
+
+fn unavailable_yet<R: Default>(name: &str) -> R {
+	tracing::error!(
+		target: LOG_TARGET,
+		"The {:?} API is not available for offchain workers yet. Follow \
+		https://github.com/paritytech/substrate/issues/1458 for details",
+		name
+	);
+	Default::default()
+}
+
+const LOCAL_DB: &str = "LOCAL (fork-aware) DB";
+
+/// Offchain DB that implements [`DbExternalities`] for [`OffchainStorage`].
+#[derive(Debug, Clone)]
+pub struct OffchainDb<Storage> {
+	/// Persistent storage database.
+	persistent: Storage,
+}
+
+impl<Storage> OffchainDb<Storage> {
+	/// Create new instance of Offchain DB.
+	pub fn new(persistent: Storage) -> Self {
+		Self { persistent }
+	}
+}
+
+impl<Storage: OffchainStorage> DbExternalities for OffchainDb<Storage> {
+	fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) {
+		tracing::debug!(
+			target: LOG_TARGET,
+			?kind,
+			key = ?array_bytes::bytes2hex("", key),
+			value = ?array_bytes::bytes2hex("", value),
+			"Write",
+		);
+		match kind {
+			StorageKind::PERSISTENT => self.persistent.set(STORAGE_PREFIX, key, value),
+			StorageKind::LOCAL => unavailable_yet(LOCAL_DB),
+		}
+	}
+
+	fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) {
+		tracing::debug!(
+			target: LOG_TARGET,
+			?kind,
+			key = ?array_bytes::bytes2hex("", key),
+			"Clear",
+		);
+		match kind {
+			StorageKind::PERSISTENT => self.persistent.remove(STORAGE_PREFIX, key),
+			StorageKind::LOCAL => unavailable_yet(LOCAL_DB),
+		}
+	}
+
+	fn local_storage_compare_and_set(
+		&mut self,
+		kind: StorageKind,
+		key: &[u8],
+		old_value: Option<&[u8]>,
+		new_value: &[u8],
+	) -> bool {
+		tracing::debug!(
+			target: LOG_TARGET,
+			?kind,
+			key = ?array_bytes::bytes2hex("", key),
+			new_value = ?array_bytes::bytes2hex("", new_value),
+			old_value = ?old_value.as_ref().map(|s| array_bytes::bytes2hex("", s)),
+			"CAS",
+		);
+		match kind {
+			StorageKind::PERSISTENT =>
+				self.persistent.compare_and_set(STORAGE_PREFIX, key, old_value, new_value),
+			StorageKind::LOCAL => unavailable_yet(LOCAL_DB),
+		}
+	}
+
+	fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option<Vec<u8>> {
+		let result = match kind {
+			StorageKind::PERSISTENT => self.persistent.get(STORAGE_PREFIX, key),
+			StorageKind::LOCAL => unavailable_yet(LOCAL_DB),
+		};
+		tracing::debug!(
+			target: LOG_TARGET,
+			?kind,
+			key = ?array_bytes::bytes2hex("", key),
+			result = ?result.as_ref().map(|s| array_bytes::bytes2hex("", s)),
+			"Read",
+		);
+		result
+	}
+}
diff --git a/substrate/primitives/externalities/src/extensions.rs b/substrate/primitives/externalities/src/extensions.rs
index 84155227a71..8b0bbd2c592 100644
--- a/substrate/primitives/externalities/src/extensions.rs
+++ b/substrate/primitives/externalities/src/extensions.rs
@@ -42,6 +42,12 @@ pub trait Extension: Send + Any {
 	fn as_mut_any(&mut self) -> &mut dyn Any;
 }
 
+impl Extension for Box<dyn Extension> {
+	fn as_mut_any(&mut self) -> &mut dyn Any {
+		(**self).as_mut_any()
+	}
+}
+
 /// Macro for declaring an extension that usable with [`Extensions`].
 ///
 /// The extension will be an unit wrapper struct that implements [`Extension`], `Deref` and
@@ -190,6 +196,14 @@ impl Extensions {
 	pub fn iter_mut(&mut self) -> impl Iterator<Item = (&TypeId, &mut Box<dyn Extension>)> {
 		self.extensions.iter_mut()
 	}
+
+	/// Merge `other` into `self`.
+	///
+	/// If both contain the same extension, the extension instance of `other` will overwrite the
+	/// instance found in `self`.
+	pub fn merge(&mut self, other: Self) {
+		self.extensions.extend(other.extensions);
+	}
 }
 
 impl Extend<Extensions> for Extensions {
diff --git a/substrate/primitives/keystore/src/lib.rs b/substrate/primitives/keystore/src/lib.rs
index 1d2a27cb872..07583d11d52 100644
--- a/substrate/primitives/keystore/src/lib.rs
+++ b/substrate/primitives/keystore/src/lib.rs
@@ -174,37 +174,36 @@ pub trait Keystore: Send + Sync {
 		msg: &[u8; 32],
 	) -> Result<Option<ecdsa::Signature>, Error>;
 
-	#[cfg(feature = "bls-experimental")]
 	/// Returns all bls12-381 public keys for the given key type.
+	#[cfg(feature = "bls-experimental")]
 	fn bls381_public_keys(&self, id: KeyTypeId) -> Vec<bls381::Public>;
 
-	#[cfg(feature = "bls-experimental")]
 	/// Returns all bls12-377 public keys for the given key type.
+	#[cfg(feature = "bls-experimental")]
 	fn bls377_public_keys(&self, id: KeyTypeId) -> Vec<bls377::Public>;
 
-	#[cfg(feature = "bls-experimental")]
 	/// Generate a new bls381 key pair for the given key type and an optional seed.
 	///
 	/// Returns an `bls381::Public` key of the generated key pair or an `Err` if
 	/// something failed during key generation.
+	#[cfg(feature = "bls-experimental")]
 	fn bls381_generate_new(
 		&self,
 		key_type: KeyTypeId,
 		seed: Option<&str>,
 	) -> Result<bls381::Public, Error>;
 
-	#[cfg(feature = "bls-experimental")]
 	/// Generate a new bls377 key pair for the given key type and an optional seed.
 	///
 	/// Returns an `bls377::Public` key of the generated key pair or an `Err` if
 	/// something failed during key generation.
+	#[cfg(feature = "bls-experimental")]
 	fn bls377_generate_new(
 		&self,
 		key_type: KeyTypeId,
 		seed: Option<&str>,
 	) -> Result<bls377::Public, Error>;
 
-	#[cfg(feature = "bls-experimental")]
 	/// Generate a bls381 signature for a given message.
 	///
 	/// Receives [`KeyTypeId`] and a [`bls381::Public`] key to be able to map
@@ -213,6 +212,7 @@ pub trait Keystore: Send + Sync {
 	/// Returns an [`bls381::Signature`] or `None` in case the given `key_type`
 	/// and `public` combination doesn't exist in the keystore.
 	/// An `Err` will be returned if generating the signature itself failed.
+	#[cfg(feature = "bls-experimental")]
 	fn bls381_sign(
 		&self,
 		key_type: KeyTypeId,
@@ -220,7 +220,6 @@ pub trait Keystore: Send + Sync {
 		msg: &[u8],
 	) -> Result<Option<bls381::Signature>, Error>;
 
-	#[cfg(feature = "bls-experimental")]
 	/// Generate a bls377 signature for a given message.
 	///
 	/// Receives [`KeyTypeId`] and a [`bls377::Public`] key to be able to map
@@ -229,6 +228,7 @@ pub trait Keystore: Send + Sync {
 	/// Returns an [`bls377::Signature`] or `None` in case the given `key_type`
 	/// and `public` combination doesn't exist in the keystore.
 	/// An `Err` will be returned if generating the signature itself failed.
+	#[cfg(feature = "bls-experimental")]
 	fn bls377_sign(
 		&self,
 		key_type: KeyTypeId,
@@ -309,6 +309,158 @@ pub trait Keystore: Send + Sync {
 	}
 }
 
+impl<T: Keystore + ?Sized> Keystore for Arc<T> {
+	fn sr25519_public_keys(&self, key_type: KeyTypeId) -> Vec<sr25519::Public> {
+		(**self).sr25519_public_keys(key_type)
+	}
+
+	fn sr25519_generate_new(
+		&self,
+		key_type: KeyTypeId,
+		seed: Option<&str>,
+	) -> Result<sr25519::Public, Error> {
+		(**self).sr25519_generate_new(key_type, seed)
+	}
+
+	fn sr25519_sign(
+		&self,
+		key_type: KeyTypeId,
+		public: &sr25519::Public,
+		msg: &[u8],
+	) -> Result<Option<sr25519::Signature>, Error> {
+		(**self).sr25519_sign(key_type, public, msg)
+	}
+
+	fn sr25519_vrf_sign(
+		&self,
+		key_type: KeyTypeId,
+		public: &sr25519::Public,
+		data: &sr25519::vrf::VrfSignData,
+	) -> Result<Option<sr25519::vrf::VrfSignature>, Error> {
+		(**self).sr25519_vrf_sign(key_type, public, data)
+	}
+
+	fn sr25519_vrf_output(
+		&self,
+		key_type: KeyTypeId,
+		public: &sr25519::Public,
+		input: &sr25519::vrf::VrfInput,
+	) -> Result<Option<sr25519::vrf::VrfOutput>, Error> {
+		(**self).sr25519_vrf_output(key_type, public, input)
+	}
+
+	fn ed25519_public_keys(&self, key_type: KeyTypeId) -> Vec<ed25519::Public> {
+		(**self).ed25519_public_keys(key_type)
+	}
+
+	fn ed25519_generate_new(
+		&self,
+		key_type: KeyTypeId,
+		seed: Option<&str>,
+	) -> Result<ed25519::Public, Error> {
+		(**self).ed25519_generate_new(key_type, seed)
+	}
+
+	fn ed25519_sign(
+		&self,
+		key_type: KeyTypeId,
+		public: &ed25519::Public,
+		msg: &[u8],
+	) -> Result<Option<ed25519::Signature>, Error> {
+		(**self).ed25519_sign(key_type, public, msg)
+	}
+
+	fn ecdsa_public_keys(&self, key_type: KeyTypeId) -> Vec<ecdsa::Public> {
+		(**self).ecdsa_public_keys(key_type)
+	}
+
+	fn ecdsa_generate_new(
+		&self,
+		key_type: KeyTypeId,
+		seed: Option<&str>,
+	) -> Result<ecdsa::Public, Error> {
+		(**self).ecdsa_generate_new(key_type, seed)
+	}
+
+	fn ecdsa_sign(
+		&self,
+		key_type: KeyTypeId,
+		public: &ecdsa::Public,
+		msg: &[u8],
+	) -> Result<Option<ecdsa::Signature>, Error> {
+		(**self).ecdsa_sign(key_type, public, msg)
+	}
+
+	fn ecdsa_sign_prehashed(
+		&self,
+		key_type: KeyTypeId,
+		public: &ecdsa::Public,
+		msg: &[u8; 32],
+	) -> Result<Option<ecdsa::Signature>, Error> {
+		(**self).ecdsa_sign_prehashed(key_type, public, msg)
+	}
+
+	fn insert(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()> {
+		(**self).insert(key_type, suri, public)
+	}
+
+	fn keys(&self, key_type: KeyTypeId) -> Result<Vec<Vec<u8>>, Error> {
+		(**self).keys(key_type)
+	}
+
+	fn has_keys(&self, public_keys: &[(Vec<u8>, KeyTypeId)]) -> bool {
+		(**self).has_keys(public_keys)
+	}
+
+	#[cfg(feature = "bls-experimental")]
+	fn bls381_public_keys(&self, id: KeyTypeId) -> Vec<bls381::Public> {
+		(**self).bls381_public_keys(id)
+	}
+
+	#[cfg(feature = "bls-experimental")]
+	fn bls377_public_keys(&self, id: KeyTypeId) -> Vec<bls377::Public> {
+		(**self).bls377_public_keys(id)
+	}
+
+	#[cfg(feature = "bls-experimental")]
+	fn bls381_generate_new(
+		&self,
+		key_type: KeyTypeId,
+		seed: Option<&str>,
+	) -> Result<bls381::Public, Error> {
+		(**self).bls381_generate_new(key_type, seed)
+	}
+
+	#[cfg(feature = "bls-experimental")]
+	fn bls377_generate_new(
+		&self,
+		key_type: KeyTypeId,
+		seed: Option<&str>,
+	) -> Result<bls377::Public, Error> {
+		(**self).bls377_generate_new(key_type, seed)
+	}
+
+	#[cfg(feature = "bls-experimental")]
+	fn bls381_sign(
+		&self,
+		key_type: KeyTypeId,
+		public: &bls381::Public,
+		msg: &[u8],
+	) -> Result<Option<bls381::Signature>, Error> {
+		(**self).bls381_sign(key_type, public, msg)
+	}
+
+	#[cfg(feature = "bls-experimental")]
+	fn bls377_sign(
+		&self,
+		key_type: KeyTypeId,
+		public: &bls377::Public,
+		msg: &[u8],
+	) -> Result<Option<bls377::Signature>, Error> {
+		(**self).bls377_sign(key_type, public, msg)
+	}
+}
+
 /// A shared pointer to a keystore implementation.
 pub type KeystorePtr = Arc<dyn Keystore>;
 
@@ -319,6 +471,13 @@ sp_externalities::decl_extension! {
 
 impl KeystoreExt {
 	/// Create a new instance of `KeystoreExt`
+	///
+	/// This is more performant as we don't need to wrap keystore in another [`Arc`].
+	pub fn from(keystore: KeystorePtr) -> Self {
+		Self(keystore)
+	}
+
+	/// Create a new instance of `KeystoreExt` using the given `keystore`.
 	pub fn new<T: Keystore + 'static>(keystore: T) -> Self {
 		Self(Arc::new(keystore))
 	}
diff --git a/substrate/primitives/runtime/src/runtime_logger.rs b/substrate/primitives/runtime/src/runtime_logger.rs
index 63e96a52a52..b7374b8b6f6 100644
--- a/substrate/primitives/runtime/src/runtime_logger.rs
+++ b/substrate/primitives/runtime/src/runtime_logger.rs
@@ -68,8 +68,7 @@ mod tests {
 	use sp_api::ProvideRuntimeApi;
 	use std::{env, str::FromStr};
 	use substrate_test_runtime_client::{
-		runtime::TestAPI, DefaultTestClientBuilderExt, ExecutionStrategy, TestClientBuilder,
-		TestClientBuilderExt,
+		runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt,
 	};
 
 	#[test]
@@ -78,9 +77,7 @@ mod tests {
 			sp_tracing::try_init_simple();
 			log::set_max_level(log::LevelFilter::from_str(&env::var("RUST_LOG").unwrap()).unwrap());
 
-			let client = TestClientBuilder::new()
-				.set_execution_strategy(ExecutionStrategy::AlwaysWasm)
-				.build();
+			let client = TestClientBuilder::new().build();
 			let runtime_api = client.runtime_api();
 			runtime_api
 				.do_trace_log(client.chain_info().genesis_hash)
diff --git a/substrate/primitives/session/Cargo.toml b/substrate/primitives/session/Cargo.toml
index f43390f0a10..dc99c356cc1 100644
--- a/substrate/primitives/session/Cargo.toml
+++ b/substrate/primitives/session/Cargo.toml
@@ -20,6 +20,7 @@ sp-core = { version = "21.0.0", default-features = false, path = "../core" }
 sp-runtime = { version = "24.0.0", optional = true, path = "../runtime" }
 sp-staking = { version = "4.0.0-dev", default-features = false, path = "../staking" }
 sp-std = { version = "8.0.0", default-features = false, path = "../std" }
+sp-keystore = { version = "0.27.0", path = "../keystore", optional = true }
 
 [features]
 default = [ "std" ]
@@ -31,4 +32,5 @@ std = [
 	"sp-runtime/std",
 	"sp-staking/std",
 	"sp-std/std",
+	"sp-keystore",
 ]
diff --git a/substrate/primitives/session/src/lib.rs b/substrate/primitives/session/src/lib.rs
index 642aa2a2114..45395e9766f 100644
--- a/substrate/primitives/session/src/lib.rs
+++ b/substrate/primitives/session/src/lib.rs
@@ -112,17 +112,22 @@ pub fn generate_initial_session_keys<Block, T>(
 	client: std::sync::Arc<T>,
 	at: Block::Hash,
 	seeds: Vec<String>,
+	keystore: sp_keystore::KeystorePtr,
 ) -> Result<(), sp_api::ApiError>
 where
 	Block: BlockT,
 	T: ProvideRuntimeApi<Block>,
 	T::Api: SessionKeys<Block>,
 {
+	use sp_api::ApiExt;
+
 	if seeds.is_empty() {
 		return Ok(())
 	}
 
-	let runtime_api = client.runtime_api();
+	let mut runtime_api = client.runtime_api();
+
+	runtime_api.register_extension(sp_keystore::KeystoreExt::from(keystore));
 
 	for seed in seeds {
 		runtime_api.generate_session_keys(at, Some(seed.as_bytes().to_vec()))?;
diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs
index cc7de9080e3..3ef923851ff 100644
--- a/substrate/primitives/state-machine/src/lib.rs
+++ b/substrate/primitives/state-machine/src/lib.rs
@@ -168,14 +168,7 @@ mod execution {
 		traits::{CallContext, CodeExecutor, RuntimeCode},
 	};
 	use sp_externalities::Extensions;
-	use std::{
-		collections::{HashMap, HashSet},
-		fmt,
-	};
-
-	const PROOF_CLOSE_TRANSACTION: &str = "\
-		Closing a transaction that was started in this function. Client initiated transactions
-		are protected from being closed by the runtime. qed";
+	use std::collections::{HashMap, HashSet};
 
 	pub(crate) type CallResult<E> = Result<Vec<u8>, E>;
 
@@ -185,21 +178,6 @@ mod execution {
 	/// Trie backend with in-memory storage.
 	pub type InMemoryBackend<H> = TrieBackend<MemoryDB<H>, H>;
 
-	/// Strategy for executing a call into the runtime.
-	#[derive(Copy, Clone, Eq, PartialEq, Debug)]
-	pub enum ExecutionStrategy {
-		/// Execute with the native equivalent if it is compatible with the given wasm module;
-		/// otherwise fall back to the wasm.
-		NativeWhenPossible,
-		/// Use the given wasm module.
-		AlwaysWasm,
-		/// Run with both the wasm and the native variant (if compatible). Report any discrepancy
-		/// as an error.
-		Both,
-		/// First native, then if that fails or is not possible, wasm.
-		NativeElseWasm,
-	}
-
 	/// Storage backend trust level.
 	#[derive(Debug, Clone)]
 	pub enum BackendTrustLevel {
@@ -211,73 +189,6 @@ mod execution {
 		Untrusted,
 	}
 
-	/// Like `ExecutionStrategy` only it also stores a handler in case of consensus failure.
-	#[derive(Clone)]
-	pub enum ExecutionManager<F> {
-		/// Execute with the native equivalent if it is compatible with the given wasm module;
-		/// otherwise fall back to the wasm.
-		NativeWhenPossible,
-		/// Use the given wasm module. The backend on which code is executed code could be
-		/// trusted to provide all storage or not (i.e. the light client cannot be trusted to
-		/// provide for all storage queries since the storage entries it has come from an external
-		/// node).
-		AlwaysWasm(BackendTrustLevel),
-		/// Run with both the wasm and the native variant (if compatible). Call `F` in the case of
-		/// any discrepancy.
-		Both(F),
-		/// First native, then if that fails or is not possible, wasm.
-		NativeElseWasm,
-	}
-
-	impl<'a, F> From<&'a ExecutionManager<F>> for ExecutionStrategy {
-		fn from(s: &'a ExecutionManager<F>) -> Self {
-			match *s {
-				ExecutionManager::NativeWhenPossible => ExecutionStrategy::NativeWhenPossible,
-				ExecutionManager::AlwaysWasm(_) => ExecutionStrategy::AlwaysWasm,
-				ExecutionManager::NativeElseWasm => ExecutionStrategy::NativeElseWasm,
-				ExecutionManager::Both(_) => ExecutionStrategy::Both,
-			}
-		}
-	}
-
-	impl ExecutionStrategy {
-		/// Gets the corresponding manager for the execution strategy.
-		pub fn get_manager<E: fmt::Debug>(self) -> ExecutionManager<DefaultHandler<E>> {
-			match self {
-				ExecutionStrategy::AlwaysWasm =>
-					ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted),
-				ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible,
-				ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm,
-				ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| {
-					warn!(
-						"Consensus error between wasm {:?} and native {:?}. Using wasm.",
-						wasm_result, native_result,
-					);
-					warn!("   Native result {:?}", native_result);
-					warn!("   Wasm result {:?}", wasm_result);
-					wasm_result
-				}),
-			}
-		}
-	}
-
-	/// Evaluate to ExecutionManager::NativeElseWasm, without having to figure out the type.
-	pub fn native_else_wasm<E>() -> ExecutionManager<DefaultHandler<E>> {
-		ExecutionManager::NativeElseWasm
-	}
-
-	/// Evaluate to ExecutionManager::AlwaysWasm with trusted backend, without having to figure out
-	/// the type.
-	fn always_wasm<E>() -> ExecutionManager<DefaultHandler<E>> {
-		ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted)
-	}
-
-	/// Evaluate ExecutionManager::AlwaysWasm with untrusted backend, without having to figure out
-	/// the type.
-	fn always_untrusted_wasm<E>() -> ExecutionManager<DefaultHandler<E>> {
-		ExecutionManager::AlwaysWasm(BackendTrustLevel::Untrusted)
-	}
-
 	/// The substrate state machine.
 	pub struct StateMachine<'a, B, H, Exec>
 	where
@@ -289,7 +200,7 @@ mod execution {
 		method: &'a str,
 		call_data: &'a [u8],
 		overlay: &'a mut OverlayedChanges,
-		extensions: Extensions,
+		extensions: &'a mut Extensions,
 		storage_transaction_cache: Option<&'a mut StorageTransactionCache<B::Transaction, H>>,
 		runtime_code: &'a RuntimeCode<'a>,
 		stats: StateMachineStats,
@@ -324,7 +235,7 @@ mod execution {
 			exec: &'a Exec,
 			method: &'a str,
 			call_data: &'a [u8],
-			extensions: Extensions,
+			extensions: &'a mut Extensions,
 			runtime_code: &'a RuntimeCode,
 			context: CallContext,
 		) -> Self {
@@ -372,13 +283,7 @@ mod execution {
 		/// blocks (e.g. a transaction at a time), ensure a different method is used.
 		///
 		/// Returns the SCALE encoded result of the executed function.
-		pub fn execute(&mut self, strategy: ExecutionStrategy) -> Result<Vec<u8>, Box<dyn Error>> {
-			// We are not giving a native call and thus we are sure that the result can never be a
-			// native value.
-			self.execute_using_consensus_failure_handler(strategy.get_manager())
-		}
-
-		fn execute_aux(&mut self, use_native: bool) -> (CallResult<Exec::Error>, bool) {
+		pub fn execute(&mut self) -> Result<Vec<u8>, Box<dyn Error>> {
 			let mut cache = StorageTransactionCache::default();
 
 			let cache = match self.storage_transaction_cache.as_mut() {
@@ -390,7 +295,7 @@ mod execution {
 				.enter_runtime()
 				.expect("StateMachine is never called from the runtime; qed");
 
-			let mut ext = Ext::new(self.overlay, cache, self.backend, Some(&mut self.extensions));
+			let mut ext = Ext::new(self.overlay, cache, self.backend, Some(self.extensions));
 
 			let ext_id = ext.id;
 
@@ -403,14 +308,10 @@ mod execution {
 				"Call",
 			);
 
-			let (result, was_native) = self.exec.call(
-				&mut ext,
-				self.runtime_code,
-				self.method,
-				self.call_data,
-				use_native,
-				self.context,
-			);
+			let result = self
+				.exec
+				.call(&mut ext, self.runtime_code, self.method, self.call_data, false, self.context)
+				.0;
 
 			self.overlay
 				.exit_runtime()
@@ -419,92 +320,11 @@ mod execution {
 			trace!(
 				target: "state",
 				ext_id = %HexDisplay::from(&ext_id.to_le_bytes()),
-				?was_native,
 				?result,
 				"Return",
 			);
 
-			(result, was_native)
-		}
-
-		fn execute_call_with_both_strategy<Handler>(
-			&mut self,
-			on_consensus_failure: Handler,
-		) -> CallResult<Exec::Error>
-		where
-			Handler:
-				FnOnce(CallResult<Exec::Error>, CallResult<Exec::Error>) -> CallResult<Exec::Error>,
-		{
-			self.overlay.start_transaction();
-			let (result, was_native) = self.execute_aux(true);
-
-			if was_native {
-				self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION);
-				let (wasm_result, _) = self.execute_aux(false);
-
-				if (result.is_ok() &&
-					wasm_result.is_ok() && result.as_ref().ok() == wasm_result.as_ref().ok()) ||
-					result.is_err() && wasm_result.is_err()
-				{
-					result
-				} else {
-					on_consensus_failure(wasm_result, result)
-				}
-			} else {
-				self.overlay.commit_transaction().expect(PROOF_CLOSE_TRANSACTION);
-				result
-			}
-		}
-
-		fn execute_call_with_native_else_wasm_strategy(&mut self) -> CallResult<Exec::Error> {
-			self.overlay.start_transaction();
-			let (result, was_native) = self.execute_aux(true);
-
-			if !was_native || result.is_ok() {
-				self.overlay.commit_transaction().expect(PROOF_CLOSE_TRANSACTION);
-				result
-			} else {
-				self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION);
-				self.execute_aux(false).0
-			}
-		}
-
-		/// Execute a call using the given state backend, overlayed changes, and call executor.
-		///
-		/// On an error, no prospective changes are written to the overlay.
-		///
-		/// Note: changes to code will be in place if this call is made again. For running partial
-		/// blocks (e.g. a transaction at a time), ensure a different method is used.
-		///
-		/// Returns the result of the executed function either in native representation `R` or
-		/// in SCALE encoded representation.
-		pub fn execute_using_consensus_failure_handler<Handler>(
-			&mut self,
-			manager: ExecutionManager<Handler>,
-		) -> Result<Vec<u8>, Box<dyn Error>>
-		where
-			Handler:
-				FnOnce(CallResult<Exec::Error>, CallResult<Exec::Error>) -> CallResult<Exec::Error>,
-		{
-			let result = {
-				match manager {
-					ExecutionManager::Both(on_consensus_failure) =>
-						self.execute_call_with_both_strategy(on_consensus_failure),
-					ExecutionManager::NativeElseWasm =>
-						self.execute_call_with_native_else_wasm_strategy(),
-					ExecutionManager::AlwaysWasm(trust_level) => {
-						let _abort_guard = match trust_level {
-							BackendTrustLevel::Trusted => None,
-							BackendTrustLevel::Untrusted =>
-								Some(sp_panic_handler::AbortGuard::never_abort()),
-						};
-						self.execute_aux(false).0
-					},
-					ExecutionManager::NativeWhenPossible => self.execute_aux(true).0,
-				}
-			};
-
-			result.map_err(|e| Box::new(e) as _)
+			result.map_err(|e| Box::new(e) as Box<_>)
 		}
 	}
 
@@ -531,7 +351,7 @@ mod execution {
 			method,
 			call_data,
 			runtime_code,
-			Default::default(),
+			&mut Default::default(),
 		)
 	}
 
@@ -551,7 +371,7 @@ mod execution {
 		method: &str,
 		call_data: &[u8],
 		runtime_code: &RuntimeCode,
-		extensions: Extensions,
+		extensions: &mut Extensions,
 	) -> Result<(Vec<u8>, StorageProof), Box<dyn Error>>
 	where
 		S: trie_backend_essence::TrieBackendStorage<H>,
@@ -572,7 +392,7 @@ mod execution {
 			runtime_code,
 			CallContext::Offchain,
 		)
-		.execute_using_consensus_failure_handler::<_>(always_wasm())?;
+		.execute()?;
 
 		let proof = proving_backend
 			.extract_proof()
@@ -627,11 +447,11 @@ mod execution {
 			exec,
 			method,
 			call_data,
-			Extensions::default(),
+			&mut Extensions::default(),
 			runtime_code,
 			CallContext::Offchain,
 		)
-		.execute_using_consensus_failure_handler(always_untrusted_wasm())
+		.execute()
 	}
 
 	/// Generate storage read proof.
@@ -1356,6 +1176,7 @@ mod tests {
 		let backend = trie_backend::tests::test_trie(state_version, None, None);
 		let mut overlayed_changes = Default::default();
 		let wasm_code = RuntimeCode::empty();
+		let mut execution_extensions = &mut Default::default();
 
 		let mut state_machine = StateMachine::new(
 			&backend,
@@ -1367,12 +1188,12 @@ mod tests {
 			},
 			"test",
 			&[],
-			Default::default(),
+			&mut execution_extensions,
 			&wasm_code,
 			CallContext::Offchain,
 		);
 
-		assert_eq!(state_machine.execute(ExecutionStrategy::NativeWhenPossible).unwrap(), vec![66]);
+		assert_eq!(state_machine.execute().unwrap(), vec![66]);
 	}
 
 	#[test]
@@ -1384,6 +1205,7 @@ mod tests {
 		let backend = trie_backend::tests::test_trie(state_version, None, None);
 		let mut overlayed_changes = Default::default();
 		let wasm_code = RuntimeCode::empty();
+		let mut execution_extensions = &mut Default::default();
 
 		let mut state_machine = StateMachine::new(
 			&backend,
@@ -1395,47 +1217,12 @@ mod tests {
 			},
 			"test",
 			&[],
-			Default::default(),
-			&wasm_code,
-			CallContext::Offchain,
-		);
-
-		assert_eq!(state_machine.execute(ExecutionStrategy::NativeElseWasm).unwrap(), vec![66]);
-	}
-
-	#[test]
-	fn dual_execution_strategy_detects_consensus_failure() {
-		dual_execution_strategy_detects_consensus_failure_inner(StateVersion::V0);
-		dual_execution_strategy_detects_consensus_failure_inner(StateVersion::V1);
-	}
-	fn dual_execution_strategy_detects_consensus_failure_inner(state_version: StateVersion) {
-		let mut consensus_failed = false;
-		let backend = trie_backend::tests::test_trie(state_version, None, None);
-		let mut overlayed_changes = Default::default();
-		let wasm_code = RuntimeCode::empty();
-
-		let mut state_machine = StateMachine::new(
-			&backend,
-			&mut overlayed_changes,
-			&DummyCodeExecutor {
-				native_available: true,
-				native_succeeds: true,
-				fallback_succeeds: false,
-			},
-			"test",
-			&[],
-			Default::default(),
+			&mut execution_extensions,
 			&wasm_code,
 			CallContext::Offchain,
 		);
 
-		assert!(state_machine
-			.execute_using_consensus_failure_handler(ExecutionManager::Both(|we, _ne| {
-				consensus_failed = true;
-				we
-			}),)
-			.is_err());
-		assert!(consensus_failed);
+		assert_eq!(state_machine.execute().unwrap(), vec![66]);
 	}
 
 	#[test]
diff --git a/substrate/scripts/ci/gitlab/pipeline/test.yml b/substrate/scripts/ci/gitlab/pipeline/test.yml
index 2aa67bbf44f..0a73e0b4114 100644
--- a/substrate/scripts/ci/gitlab/pipeline/test.yml
+++ b/substrate/scripts/ci/gitlab/pipeline/test.yml
@@ -115,8 +115,8 @@ cargo-check-benches:
       rusty-cachier cache upload
       ;;
       2)
-      cargo run --locked --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small --json
-      | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::native::sr25519::transfer_keep_alive::paritydb::small.json
+      cargo run --locked --release -p node-bench -- ::node::import::sr25519::transfer_keep_alive::paritydb::small --json
+      | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::sr25519::transfer_keep_alive::paritydb::small.json
       ;;
       esac
 
@@ -305,7 +305,7 @@ quick-benchmarks:
     WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings"
   script:
     - rusty-cachier snapshot create
-    - time cargo run --locked --release --features runtime-benchmarks -- benchmark pallet --execution wasm --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1
+    - time cargo run --locked --release --features runtime-benchmarks -- benchmark pallet --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1
     - rusty-cachier cache upload
 
 test-frame-examples-compile-to-wasm:
diff --git a/substrate/scripts/run_all_benchmarks.sh b/substrate/scripts/run_all_benchmarks.sh
index 727b49e26af..83848100a7e 100755
--- a/substrate/scripts/run_all_benchmarks.sh
+++ b/substrate/scripts/run_all_benchmarks.sh
@@ -119,7 +119,6 @@ for PALLET in "${PALLETS[@]}"; do
     --repeat=20 \
     --pallet="$PALLET" \
     --extrinsic="*" \
-    --execution=wasm \
     --wasm-execution=compiled \
     --heap-pages=4096 \
     --output="$WEIGHT_FILE" \
@@ -137,7 +136,6 @@ echo "[+] Benchmarking block and extrinsic overheads..."
 OUTPUT=$(
   $SUBSTRATE benchmark overhead \
   --chain=dev \
-  --execution=wasm \
   --wasm-execution=compiled \
   --weight-path="./frame/support/src/weights/" \
   --header="./HEADER-APACHE2" \
diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs
index 94006fd9acb..90e15e0f8d5 100644
--- a/substrate/test-utils/client/src/lib.rs
+++ b/substrate/test-utils/client/src/lib.rs
@@ -22,10 +22,7 @@
 pub mod client_ext;
 
 pub use self::client_ext::{ClientBlockImportExt, ClientExt};
-pub use sc_client_api::{
-	execution_extensions::{ExecutionExtensions, ExecutionStrategies},
-	BadBlocks, ForkBlocks,
-};
+pub use sc_client_api::{execution_extensions::ExecutionExtensions, BadBlocks, ForkBlocks};
 pub use sc_client_db::{self, Backend, BlocksPruning};
 pub use sc_executor::{self, NativeElseWasmExecutor, WasmExecutionMethod, WasmExecutor};
 pub use sc_service::{client, RpcHandlers};
@@ -35,7 +32,6 @@ pub use sp_keyring::{
 };
 pub use sp_keystore::{Keystore, KeystorePtr};
 pub use sp_runtime::{Storage, StorageChild};
-pub use sp_state_machine::ExecutionStrategy;
 
 use futures::{future::Future, stream::StreamExt};
 use sc_client_api::BlockchainEvents;
@@ -67,14 +63,12 @@ impl GenesisInit for () {
 
 /// A builder for creating a test client instance.
 pub struct TestClientBuilder<Block: BlockT, ExecutorDispatch, Backend: 'static, G: GenesisInit> {
-	execution_strategies: ExecutionStrategies,
 	genesis_init: G,
 	/// The key is an unprefixed storage key, this only contains
 	/// default child trie content.
 	child_storage_extension: HashMap<Vec<u8>, StorageChild>,
 	backend: Arc<Backend>,
 	_executor: std::marker::PhantomData<ExecutorDispatch>,
-	keystore: Option<KeystorePtr>,
 	fork_blocks: ForkBlocks<Block>,
 	bad_blocks: BadBlocks<Block>,
 	enable_offchain_indexing_api: bool,
@@ -119,11 +113,9 @@ impl<Block: BlockT, ExecutorDispatch, Backend, G: GenesisInit>
 	pub fn with_backend(backend: Arc<Backend>) -> Self {
 		TestClientBuilder {
 			backend,
-			execution_strategies: ExecutionStrategies::default(),
 			child_storage_extension: Default::default(),
 			genesis_init: Default::default(),
 			_executor: Default::default(),
-			keystore: None,
 			fork_blocks: None,
 			bad_blocks: None,
 			enable_offchain_indexing_api: false,
@@ -131,12 +123,6 @@ impl<Block: BlockT, ExecutorDispatch, Backend, G: GenesisInit>
 		}
 	}
 
-	/// Set the keystore that should be used by the externalities.
-	pub fn set_keystore(mut self, keystore: KeystorePtr) -> Self {
-		self.keystore = Some(keystore);
-		self
-	}
-
 	/// Alter the genesis storage parameters.
 	pub fn genesis_init_mut(&mut self) -> &mut G {
 		&mut self.genesis_init
@@ -162,18 +148,6 @@ impl<Block: BlockT, ExecutorDispatch, Backend, G: GenesisInit>
 		self
 	}
 
-	/// Set the execution strategy that should be used by all contexts.
-	pub fn set_execution_strategy(mut self, execution_strategy: ExecutionStrategy) -> Self {
-		self.execution_strategies = ExecutionStrategies {
-			syncing: execution_strategy,
-			importing: execution_strategy,
-			block_construction: execution_strategy,
-			offchain_worker: execution_strategy,
-			other: execution_strategy,
-		};
-		self
-	}
-
 	/// Sets custom block rules.
 	pub fn set_block_rules(
 		mut self,
@@ -296,12 +270,7 @@ impl<Block: BlockT, D, Backend, G: GenesisInit>
 			self.backend.clone(),
 			executor.clone(),
 			Default::default(),
-			ExecutionExtensions::new(
-				self.execution_strategies.clone(),
-				self.keystore.clone(),
-				sc_offchain::OffchainDb::factory_from_backend(&*self.backend),
-				Arc::new(executor),
-			),
+			ExecutionExtensions::new(None, Arc::new(executor)),
 		)
 		.expect("Creates LocalCallExecutor");
 
diff --git a/substrate/test-utils/runtime/src/genesismap.rs b/substrate/test-utils/runtime/src/genesismap.rs
index f1289ce41a5..ed27bce0b35 100644
--- a/substrate/test-utils/runtime/src/genesismap.rs
+++ b/substrate/test-utils/runtime/src/genesismap.rs
@@ -138,10 +138,9 @@ impl GenesisStorageBuilder {
 			.build_storage()
 			.expect("Build storage from substrate-test-runtime GenesisConfig");
 
-		storage.top.insert(
-			well_known_keys::HEAP_PAGES.into(),
-			self.heap_pages_override.unwrap_or(16_u64).encode(),
-		);
+		if let Some(heap_pages) = self.heap_pages_override {
+			storage.top.insert(well_known_keys::HEAP_PAGES.into(), heap_pages.encode());
+		}
 
 		storage.top.extend(self.extra_storage.top.clone());
 		storage.children_default.extend(self.extra_storage.children_default.clone());
diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs
index d02b378e154..8beed61d3d3 100644
--- a/substrate/test-utils/runtime/src/lib.rs
+++ b/substrate/test-utils/runtime/src/lib.rs
@@ -845,8 +845,12 @@ pub mod storage_key_generator {
 
 	/// Generate the hashed storage keys from the raw literals. These keys are expected to be be in
 	/// storage with given substrate-test runtime.
-	pub fn generate_expected_storage_hashed_keys() -> Vec<String> {
-		let literals: Vec<&[u8]> = vec![b":code", b":extrinsic_index", b":heappages"];
+	pub fn generate_expected_storage_hashed_keys(custom_heap_pages: bool) -> Vec<String> {
+		let mut literals: Vec<&[u8]> = vec![b":code", b":extrinsic_index"];
+
+		if custom_heap_pages {
+			literals.push(b":heappages");
+		}
 
 		let keys: Vec<Vec<&[u8]>> = vec![
 			vec![b"Babe", b"Authorities"],
@@ -906,8 +910,11 @@ pub mod storage_key_generator {
 	/// that would be generated by `generate_expected_storage_hashed_keys`. This list is provided
 	/// for the debugging convenience only. Value of each hex-string is documented with the literal
 	/// origin.
-	pub fn get_expected_storage_hashed_keys() -> Vec<String> {
-		[
+	///
+	/// `custom_heap_pages`: Should be set to `true` when the state contains the `:heap_pages` key
+	/// aka when overriding the heap pages to be used by the executor.
+	pub fn get_expected_storage_hashed_keys(custom_heap_pages: bool) -> Vec<&'static str> {
+		let mut res = vec![
 			//System|:__STORAGE_VERSION__:
 			"00771836bebdd29870ff246d305c578c4e7b9012096b41c4eb3aaf947f6ea429",
 			//SubstrateTest|Authorities
@@ -977,20 +984,25 @@ pub mod storage_key_generator {
 			"3a636f6465",
 			// :extrinsic_index
 			"3a65787472696e7369635f696e646578",
-			// :heappages
-			"3a686561707061676573",
 			// Balances|:__STORAGE_VERSION__:
 			"c2261276cc9d1f8598ea4b6a74b15c2f4e7b9012096b41c4eb3aaf947f6ea429",
 			// Balances|TotalIssuance
 			"c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80",
-		].into_iter().map(String::from).collect::<Vec<_>>()
+		];
+
+		if custom_heap_pages {
+			// :heappages
+			res.push("3a686561707061676573");
+		}
+
+		res
 	}
 
 	#[test]
 	fn expected_keys_vec_are_matching() {
 		assert_eq!(
-			storage_key_generator::get_expected_storage_hashed_keys(),
-			storage_key_generator::generate_expected_storage_hashed_keys(),
+			storage_key_generator::get_expected_storage_hashed_keys(false),
+			storage_key_generator::generate_expected_storage_hashed_keys(false),
 		);
 	}
 }
@@ -1001,15 +1013,14 @@ mod tests {
 	use codec::Encode;
 	use frame_support::dispatch::DispatchInfo;
 	use sc_block_builder::BlockBuilderProvider;
-	use sp_api::ProvideRuntimeApi;
+	use sp_api::{ApiExt, ProvideRuntimeApi};
 	use sp_consensus::BlockOrigin;
-	use sp_core::{storage::well_known_keys::HEAP_PAGES, ExecutionContext};
+	use sp_core::{storage::well_known_keys::HEAP_PAGES, traits::CallContext};
 	use sp_keyring::AccountKeyring;
 	use sp_runtime::{
 		traits::{Hash as _, SignedExtension},
 		transaction_validity::{InvalidTransaction, ValidTransaction},
 	};
-	use sp_state_machine::ExecutionStrategy;
 	use substrate_test_runtime_client::{
 		prelude::*, runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder,
 	};
@@ -1019,20 +1030,15 @@ mod tests {
 		// This tests that the on-chain `HEAP_PAGES` parameter is respected.
 
 		// Create a client devoting only 8 pages of wasm memory. This gives us ~512k of heap memory.
-		let mut client = TestClientBuilder::new()
-			.set_execution_strategy(ExecutionStrategy::AlwaysWasm)
-			.set_heap_pages(8)
-			.build();
+		let mut client = TestClientBuilder::new().set_heap_pages(8).build();
 		let best_hash = client.chain_info().best_hash;
 
 		// Try to allocate 1024k of memory on heap. This is going to fail since it is twice larger
 		// than the heap.
-		let ret = client.runtime_api().vec_with_capacity_with_context(
-			best_hash,
-			// Use `BlockImport` to ensure we use the on chain heap pages as configured above.
-			ExecutionContext::Importing,
-			1048576,
-		);
+		let mut runtime_api = client.runtime_api();
+		// This is currently required to allocate the 1024k of memory as configured above.
+		runtime_api.set_call_context(CallContext::Onchain);
+		let ret = runtime_api.vec_with_capacity(best_hash, 1048576);
 		assert!(ret.is_err());
 
 		// Create a block that sets the `:heap_pages` to 32 pages of memory which corresponds to
@@ -1054,8 +1060,7 @@ mod tests {
 
 	#[test]
 	fn test_storage() {
-		let client =
-			TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build();
+		let client = TestClientBuilder::new().build();
 		let runtime_api = client.runtime_api();
 		let best_hash = client.chain_info().best_hash;
 
@@ -1080,8 +1085,7 @@ mod tests {
 		let backend =
 			sp_state_machine::TrieBackendBuilder::<_, crate::Hashing>::new(db, root).build();
 		let proof = sp_state_machine::prove_read(backend, vec![b"value3"]).unwrap();
-		let client =
-			TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build();
+		let client = TestClientBuilder::new().build();
 		let runtime_api = client.runtime_api();
 		let best_hash = client.chain_info().best_hash;
 
@@ -1108,7 +1112,7 @@ mod tests {
 				.cloned()
 				.map(storage_key_generator::hex)
 				.collect::<Vec<_>>(),
-			storage_key_generator::get_expected_storage_hashed_keys()
+			storage_key_generator::get_expected_storage_hashed_keys(false)
 		);
 	}
 
diff --git a/substrate/test-utils/runtime/src/substrate_test_pallet.rs b/substrate/test-utils/runtime/src/substrate_test_pallet.rs
index 0864c952b70..d080caa0898 100644
--- a/substrate/test-utils/runtime/src/substrate_test_pallet.rs
+++ b/substrate/test-utils/runtime/src/substrate_test_pallet.rs
@@ -24,7 +24,7 @@
 use frame_support::{pallet_prelude::*, storage};
 use sp_core::sr25519::Public;
 use sp_runtime::{
-	traits::{BlakeTwo256, Hash},
+	traits::Hash,
 	transaction_validity::{
 		InvalidTransaction, TransactionSource, TransactionValidity, ValidTransaction,
 	},
@@ -41,7 +41,7 @@ pub mod pallet {
 	use crate::TransferData;
 	use frame_system::pallet_prelude::*;
 	use sp_core::storage::well_known_keys;
-	use sp_runtime::{transaction_validity::TransactionPriority, Perbill};
+	use sp_runtime::{traits::BlakeTwo256, transaction_validity::TransactionPriority, Perbill};
 
 	#[pallet::pallet]
 	#[pallet::without_storage_info]
diff --git a/substrate/utils/frame/benchmarking-cli/src/block/cmd.rs b/substrate/utils/frame/benchmarking-cli/src/block/cmd.rs
index 0192372fa33..90b71cd78c2 100644
--- a/substrate/utils/frame/benchmarking-cli/src/block/cmd.rs
+++ b/substrate/utils/frame/benchmarking-cli/src/block/cmd.rs
@@ -39,12 +39,12 @@ use super::bench::{Benchmark, BenchmarkParams};
 /// did not use more weight than declared which would otherwise be an issue.
 /// To test this with a dev node, first create one with a temp directory:
 ///
-/// $ substrate --dev -d /tmp/my-dev --execution wasm --wasm-execution compiled
+/// $ substrate --dev -d /tmp/my-dev --wasm-execution compiled
 ///
 /// And wait some time to let it produce 3 blocks. Then benchmark them with:
 ///
 /// $ substrate benchmark-block --from 1 --to 3 --dev -d /tmp/my-dev
-///   --execution wasm --wasm-execution compiled --pruning archive
+///   --wasm-execution compiled --pruning archive
 ///
 /// The output will be similar to this:
 ///
diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/README.md b/substrate/utils/frame/benchmarking-cli/src/overhead/README.md
index 85bcc7fa36f..390bc09e417 100644
--- a/substrate/utils/frame/benchmarking-cli/src/overhead/README.md
+++ b/substrate/utils/frame/benchmarking-cli/src/overhead/README.md
@@ -103,12 +103,12 @@ Writing weights to "extrinsic_weights.rs"
 
 The complete command for Polkadot looks like this:
 ```sh
-cargo run --profile=production -- benchmark overhead --chain=polkadot-dev --execution=wasm --wasm-execution=compiled --weight-path=runtime/polkadot/constants/src/weights/
+cargo run --profile=production -- benchmark overhead --chain=polkadot-dev --wasm-execution=compiled --weight-path=runtime/polkadot/constants/src/weights/
 ```
 
 This will overwrite the the [block_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/block_weights.rs) and [extrinsic_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/extrinsic_weights.rs) files in the Polkadot runtime directory.
 You can try the same for *Rococo* and to see that the results slightly differ.
-👉 It is paramount to use `--profile=production`, `--execution=wasm` and `--wasm-execution=compiled` as the results are otherwise useless.
+👉 It is paramount to use `--profile=production` and `--wasm-execution=compiled` as the results are otherwise useless.
 
 ## Output Interpretation
 
@@ -122,7 +122,6 @@ Minimizing this is important to have a large transaction throughput.
 - `--weight-path` Set the output directory or file to write the weights to.
 - `--repeat` Set the repetitions of both benchmarks.
 - `--warmup` Set the rounds of warmup before measuring.
-- `--execution` Should be set to `wasm` for correct results.
 - `--wasm-execution` Should be set to `compiled` for correct results.
 - [`--mul`](../shared/README.md#arguments)
 - [`--add`](../shared/README.md#arguments)
diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs
index 7f587d9c7fa..1d2d81e3577 100644
--- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs
+++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs
@@ -23,9 +23,7 @@ use frame_benchmarking::{
 };
 use frame_support::traits::StorageInfo;
 use linked_hash_map::LinkedHashMap;
-use sc_cli::{
-	execution_method_from_cli, CliConfiguration, ExecutionStrategy, Result, SharedParams,
-};
+use sc_cli::{execution_method_from_cli, CliConfiguration, Result, SharedParams};
 use sc_client_db::BenchmarkingState;
 use sc_executor::WasmExecutor;
 use sc_service::Configuration;
@@ -182,7 +180,6 @@ impl PalletCmd {
 		}
 
 		let spec = config.chain_spec;
-		let strategy = self.execution.unwrap_or(ExecutionStrategy::Wasm);
 		let pallet = self.pallet.clone().unwrap_or_default();
 		let pallet = pallet.as_bytes();
 		let extrinsic = self.extrinsic.clone().unwrap_or_default();
@@ -243,11 +240,11 @@ impl PalletCmd {
 			&executor,
 			"Benchmark_benchmark_metadata",
 			&(self.extra).encode(),
-			extensions(),
+			&mut extensions(),
 			&sp_state_machine::backend::BackendRuntimeCode::new(state).runtime_code()?,
 			CallContext::Offchain,
 		)
-		.execute(strategy.into())
+		.execute()
 		.map_err(|e| format!("{}: {}", ERROR_METADATA_NOT_FOUND, e))?;
 
 		let (list, storage_info) =
@@ -379,12 +376,12 @@ impl PalletCmd {
 							1,    // no need to do internal repeats
 						)
 							.encode(),
-						extensions(),
+						&mut extensions(),
 						&sp_state_machine::backend::BackendRuntimeCode::new(state)
 							.runtime_code()?,
 						CallContext::Offchain,
 					)
-					.execute(strategy.into())
+					.execute()
 					.map_err(|e| {
 						format!("Error executing and verifying runtime benchmark: {}", e)
 					})?;
@@ -419,12 +416,12 @@ impl PalletCmd {
 							self.repeat,
 						)
 							.encode(),
-						extensions(),
+						&mut extensions(),
 						&sp_state_machine::backend::BackendRuntimeCode::new(state)
 							.runtime_code()?,
 						CallContext::Offchain,
 					)
-					.execute(strategy.into())
+					.execute()
 					.map_err(|e| format!("Error executing runtime benchmark: {}", e))?;
 
 					let batch =
@@ -451,12 +448,12 @@ impl PalletCmd {
 							self.repeat,
 						)
 							.encode(),
-						extensions(),
+						&mut extensions(),
 						&sp_state_machine::backend::BackendRuntimeCode::new(state)
 							.runtime_code()?,
 						CallContext::Offchain,
 					)
-					.execute(strategy.into())
+					.execute()
 					.map_err(|e| format!("Error executing runtime benchmark: {}", e))?;
 
 					let batch =
diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs
index ca87ddebb32..5090a601f0c 100644
--- a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs
+++ b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs
@@ -20,8 +20,8 @@ mod writer;
 
 use crate::shared::HostInfoParams;
 use sc_cli::{
-	ExecutionStrategy, WasmExecutionMethod, WasmtimeInstantiationStrategy,
-	DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, DEFAULT_WASM_EXECUTION_METHOD,
+	WasmExecutionMethod, WasmtimeInstantiationStrategy, DEFAULT_WASMTIME_INSTANTIATION_STRATEGY,
+	DEFAULT_WASM_EXECUTION_METHOD,
 };
 use std::{fmt::Debug, path::PathBuf};
 
@@ -129,10 +129,6 @@ pub struct PalletCmd {
 	#[clap(flatten)]
 	pub shared_params: sc_cli::SharedParams,
 
-	/// The execution strategy that should be used for benchmarks.
-	#[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)]
-	pub execution: Option<ExecutionStrategy>,
-
 	/// Method for executing Wasm runtime code.
 	#[arg(
 		long = "wasm-execution",
diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs
index fd1b92b90c1..69c95d13c09 100644
--- a/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs
+++ b/substrate/utils/frame/benchmarking-cli/src/pallet/writer.rs
@@ -90,7 +90,6 @@ struct CmdData {
 	repeat: u32,
 	lowest_range_values: Vec<u32>,
 	highest_range_values: Vec<u32>,
-	execution: String,
 	wasm_execution: String,
 	chain: String,
 	db_cache: u32,
@@ -425,7 +424,6 @@ pub(crate) fn write_results(
 		repeat: cmd.repeat,
 		lowest_range_values: cmd.lowest_range_values.clone(),
 		highest_range_values: cmd.highest_range_values.clone(),
-		execution: format!("{:?}", cmd.execution),
 		wasm_execution: cmd.wasm_method.to_string(),
 		chain: format!("{:?}", cmd.shared_params.chain),
 		db_cache: cmd.database_cache_size,
diff --git a/substrate/utils/frame/try-runtime/cli/src/lib.rs b/substrate/utils/frame/try-runtime/cli/src/lib.rs
index fb4a644e200..fbc55ad1dce 100644
--- a/substrate/utils/frame/try-runtime/cli/src/lib.rs
+++ b/substrate/utils/frame/try-runtime/cli/src/lib.rs
@@ -858,7 +858,7 @@ pub(crate) fn state_machine_call<Block: BlockT, HostFns: HostFunctions>(
 	executor: &WasmExecutor<HostFns>,
 	method: &'static str,
 	data: &[u8],
-	extensions: Extensions,
+	mut extensions: Extensions,
 ) -> sc_cli::Result<(OverlayedChanges, Vec<u8>)> {
 	let mut changes = Default::default();
 	let encoded_results = StateMachine::new(
@@ -867,11 +867,11 @@ pub(crate) fn state_machine_call<Block: BlockT, HostFns: HostFunctions>(
 		executor,
 		method,
 		data,
-		extensions,
+		&mut extensions,
 		&sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?,
 		CallContext::Offchain,
 	)
-	.execute(sp_state_machine::ExecutionStrategy::AlwaysWasm)
+	.execute()
 	.map_err(|e| format!("failed to execute '{}': {}", method, e))
 	.map_err::<sc_cli::Error, _>(Into::into)?;
 
@@ -887,7 +887,7 @@ pub(crate) fn state_machine_call_with_proof<Block: BlockT, HostFns: HostFunction
 	executor: &WasmExecutor<HostFns>,
 	method: &'static str,
 	data: &[u8],
-	extensions: Extensions,
+	mut extensions: Extensions,
 	maybe_export_proof: Option<PathBuf>,
 ) -> sc_cli::Result<(OverlayedChanges, Vec<u8>)> {
 	use parity_scale_codec::Encode;
@@ -906,11 +906,11 @@ pub(crate) fn state_machine_call_with_proof<Block: BlockT, HostFns: HostFunction
 		executor,
 		method,
 		data,
-		extensions,
+		&mut extensions,
 		&runtime_code,
 		CallContext::Offchain,
 	)
-	.execute(sp_state_machine::ExecutionStrategy::AlwaysWasm)
+	.execute()
 	.map_err(|e| format!("failed to execute {}: {}", method, e))
 	.map_err::<sc_cli::Error, _>(Into::into)?;
 
-- 
GitLab