diff --git a/cumulus/polkadot-parachain/src/cli.rs b/cumulus/polkadot-parachain/src/cli.rs
index 7c01e34f9a03ccd8e7d92cc9ec719f7fdd172829..d06354dda22057fda87bb21e47340e38835abb64 100644
--- a/cumulus/polkadot-parachain/src/cli.rs
+++ b/cumulus/polkadot-parachain/src/cli.rs
@@ -14,6 +14,7 @@
 // You should have received a copy of the GNU General Public License
 // along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
 
+use crate::common::NodeExtraArgs;
 use clap::{Command, CommandFactory, FromArgMatches};
 use sc_cli::SubstrateCli;
 use std::path::PathBuf;
@@ -94,6 +95,12 @@ pub struct Cli {
 	pub relay_chain_args: Vec<String>,
 }
 
+impl Cli {
+	pub(crate) fn node_extra_args(&self) -> NodeExtraArgs {
+		NodeExtraArgs { use_slot_based_consensus: self.experimental_use_slot_based }
+	}
+}
+
 #[derive(Debug)]
 pub struct RelayChainCli {
 	/// The actual relay chain cli object.
diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs
index 323216f300d85773589fb68ecb681ffc50c364e9..fcf6c06f42227d5f8b9bb4d0c50686f699854511 100644
--- a/cumulus/polkadot-parachain/src/command.rs
+++ b/cumulus/polkadot-parachain/src/command.rs
@@ -14,15 +14,20 @@
 // You should have received a copy of the GNU General Public License
 // along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
 
+#[cfg(feature = "runtime-benchmarks")]
+use crate::service::Block;
 use crate::{
 	chain_spec,
 	chain_spec::GenericChainSpec,
 	cli::{Cli, RelayChainCli, Subcommand},
+	common::NodeExtraArgs,
 	fake_runtime_api::{
-		asset_hub_polkadot_aura::RuntimeApi as AssetHubPolkadotRuntimeApi, aura::RuntimeApi,
+		asset_hub_polkadot_aura::RuntimeApi as AssetHubPolkadotRuntimeApi,
+		aura::RuntimeApi as AuraRuntimeApi,
 	},
-	service::{new_partial, Block, Hash},
+	service::{new_aura_node_spec, DynNodeSpec, ShellNode},
 };
+#[cfg(feature = "runtime-benchmarks")]
 use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions;
 use cumulus_primitives_core::ParaId;
 use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE};
@@ -34,6 +39,8 @@ use sc_cli::{
 };
 use sc_service::config::{BasePath, PrometheusConfig};
 use sp_runtime::traits::AccountIdConversion;
+#[cfg(feature = "runtime-benchmarks")]
+use sp_runtime::traits::HashingFor;
 use std::{net::SocketAddr, path::PathBuf};
 
 /// The choice of consensus for the parachain omni-node.
@@ -110,6 +117,7 @@ fn runtime(id: &str) -> Runtime {
 	} else if id.starts_with("asset-hub-kusama") |
 		id.starts_with("statemine") |
 		id.starts_with("asset-hub-rococo") |
+		id.starts_with("rockmine") |
 		id.starts_with("asset-hub-westend") |
 		id.starts_with("westmint")
 	{
@@ -378,146 +386,27 @@ impl SubstrateCli for RelayChainCli {
 	}
 }
 
-/// Creates partial components for the runtimes that are supported by the benchmarks.
-macro_rules! construct_partials {
-	($config:expr, |$partials:ident| $code:expr) => {
-		match $config.chain_spec.runtime()? {
-			Runtime::AssetHubPolkadot => {
-				let $partials = new_partial::<AssetHubPolkadotRuntimeApi, _>(
-					&$config,
-					crate::service::build_relay_to_aura_import_queue::<_, AssetHubPolkadotAuraId>,
-				)?;
-				$code
-			},
-			Runtime::AssetHub |
-			Runtime::BridgeHub(_) |
-			Runtime::Collectives |
-			Runtime::Coretime(_) |
-			Runtime::People(_) => {
-				let $partials = new_partial::<RuntimeApi, _>(
-					&$config,
-					crate::service::build_relay_to_aura_import_queue::<_, AuraId>,
-				)?;
-				$code
-			},
-			Runtime::Glutton | Runtime::Shell | Runtime::Seedling => {
-				let $partials = new_partial::<RuntimeApi, _>(
-					&$config,
-					crate::service::build_shell_import_queue,
-				)?;
-				$code
-			},
-			Runtime::ContractsRococo | Runtime::Penpal(_) => {
-				let $partials = new_partial::<RuntimeApi, _>(
-					&$config,
-					crate::service::build_aura_import_queue,
-				)?;
-				$code
-			},
-			Runtime::Omni(consensus) => match consensus {
-				Consensus::Aura => {
-					let $partials = new_partial::<RuntimeApi, _>(
-						&$config,
-						crate::service::build_aura_import_queue,
-					)?;
-					$code
-				},
-				Consensus::Relay => {
-					let $partials = new_partial::<RuntimeApi, _>(
-						&$config,
-						crate::service::build_shell_import_queue,
-					)?;
-					$code
-				},
-			},
-		}
-	};
-}
-
-macro_rules! construct_async_run {
-	(|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{
-		let runner = $cli.create_runner($cmd)?;
-		match runner.config().chain_spec.runtime()? {
-			Runtime::AssetHubPolkadot => {
-				runner.async_run(|$config| {
-					let $components = new_partial::<AssetHubPolkadotRuntimeApi, _>(
-						&$config,
-						crate::service::build_relay_to_aura_import_queue::<_, AssetHubPolkadotAuraId>,
-					)?;
-					let task_manager = $components.task_manager;
-					{ $( $code )* }.map(|v| (v, task_manager))
-				})
-			},
-			Runtime::AssetHub |
-			Runtime::BridgeHub(_) |
-			Runtime::Collectives |
-			Runtime::Coretime(_) |
-			Runtime::People(_) => {
-				runner.async_run(|$config| {
-					let $components = new_partial::<RuntimeApi, _>(
-						&$config,
-						crate::service::build_relay_to_aura_import_queue::<_, AuraId>,
-					)?;
-					let task_manager = $components.task_manager;
-					{ $( $code )* }.map(|v| (v, task_manager))
-				})
-			},
-			Runtime::Shell |
-			Runtime::Seedling |
-			Runtime::Glutton => {
-				runner.async_run(|$config| {
-					let $components = new_partial::<RuntimeApi, _>(
-						&$config,
-						crate::service::build_shell_import_queue,
-					)?;
-					let task_manager = $components.task_manager;
-					{ $( $code )* }.map(|v| (v, task_manager))
-				})
-			}
-			Runtime::ContractsRococo | Runtime::Penpal(_) => {
-				runner.async_run(|$config| {
-					let $components = new_partial::<
-						RuntimeApi,
-						_,
-					>(
-						&$config,
-						crate::service::build_aura_import_queue,
-					)?;
-					let task_manager = $components.task_manager;
-					{ $( $code )* }.map(|v| (v, task_manager))
-				})
-			},
-			Runtime::Omni(consensus) => match consensus {
-				Consensus::Aura => {
-					runner.async_run(|$config| {
-						let $components = new_partial::<
-							RuntimeApi,
-							_,
-						>(
-							&$config,
-							crate::service::build_aura_import_queue,
-						)?;
-						let task_manager = $components.task_manager;
-						{ $( $code )* }.map(|v| (v, task_manager))
-					})
-				},
-				Consensus::Relay
-				 => {
-					runner.async_run(|$config| {
-						let $components = new_partial::<
-							RuntimeApi,
-							_,
-						>(
-							&$config,
-							crate::service::build_shell_import_queue,
-						)?;
-						let task_manager = $components.task_manager;
-						{ $( $code )* }.map(|v| (v, task_manager))
-					})
-				},
-			}
-		}
-	}}
+fn new_node_spec(
+	config: &sc_service::Configuration,
+	extra_args: NodeExtraArgs,
+) -> std::result::Result<Box<dyn DynNodeSpec>, sc_cli::Error> {
+	Ok(match config.chain_spec.runtime()? {
+		Runtime::AssetHubPolkadot =>
+			new_aura_node_spec::<AssetHubPolkadotRuntimeApi, AssetHubPolkadotAuraId>(extra_args),
+		Runtime::AssetHub |
+		Runtime::BridgeHub(_) |
+		Runtime::Collectives |
+		Runtime::Coretime(_) |
+		Runtime::People(_) |
+		Runtime::ContractsRococo |
+		Runtime::Glutton |
+		Runtime::Penpal(_) => new_aura_node_spec::<AuraRuntimeApi, AuraId>(extra_args),
+		Runtime::Shell | Runtime::Seedling => Box::new(ShellNode),
+		Runtime::Omni(consensus) => match consensus {
+			Consensus::Aura => new_aura_node_spec::<AuraRuntimeApi, AuraId>(extra_args),
+			Consensus::Relay => Box::new(ShellNode),
+		},
+	})
 }
 
 /// Parse command line arguments into service configuration.
@@ -530,28 +419,40 @@ pub fn run() -> Result<()> {
 			runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
 		},
 		Some(Subcommand::CheckBlock(cmd)) => {
-			construct_async_run!(|components, cli, cmd, config| {
-				Ok(cmd.run(components.client, components.import_queue))
+			let runner = cli.create_runner(cmd)?;
+			runner.async_run(|config| {
+				let node = new_node_spec(&config, cli.node_extra_args())?;
+				node.prepare_check_block_cmd(config, cmd)
 			})
 		},
 		Some(Subcommand::ExportBlocks(cmd)) => {
-			construct_async_run!(|components, cli, cmd, config| {
-				Ok(cmd.run(components.client, config.database))
+			let runner = cli.create_runner(cmd)?;
+			runner.async_run(|config| {
+				let node = new_node_spec(&config, cli.node_extra_args())?;
+				node.prepare_export_blocks_cmd(config, cmd)
 			})
 		},
 		Some(Subcommand::ExportState(cmd)) => {
-			construct_async_run!(|components, cli, cmd, config| {
-				Ok(cmd.run(components.client, config.chain_spec))
+			let runner = cli.create_runner(cmd)?;
+			runner.async_run(|config| {
+				let node = new_node_spec(&config, cli.node_extra_args())?;
+				node.prepare_export_state_cmd(config, cmd)
 			})
 		},
 		Some(Subcommand::ImportBlocks(cmd)) => {
-			construct_async_run!(|components, cli, cmd, config| {
-				Ok(cmd.run(components.client, components.import_queue))
+			let runner = cli.create_runner(cmd)?;
+			runner.async_run(|config| {
+				let node = new_node_spec(&config, cli.node_extra_args())?;
+				node.prepare_import_blocks_cmd(config, cmd)
+			})
+		},
+		Some(Subcommand::Revert(cmd)) => {
+			let runner = cli.create_runner(cmd)?;
+			runner.async_run(|config| {
+				let node = new_node_spec(&config, cli.node_extra_args())?;
+				node.prepare_revert_cmd(config, cmd)
 			})
 		},
-		Some(Subcommand::Revert(cmd)) => construct_async_run!(|components, cli, cmd, config| {
-			Ok(cmd.run(components.client, components.backend, None))
-		}),
 		Some(Subcommand::PurgeChain(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			let polkadot_cli = RelayChainCli::new(runner.config(), cli.relay_chain_args.iter());
@@ -569,8 +470,10 @@ pub fn run() -> Result<()> {
 		},
 		Some(Subcommand::ExportGenesisHead(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
-			runner
-				.sync_run(|config| construct_partials!(config, |partials| cmd.run(partials.client)))
+			runner.sync_run(|config| {
+				let node = new_node_spec(&config, cli.node_extra_args())?;
+				node.run_export_genesis_head_cmd(config, cmd)
+			})
 		},
 		Some(Subcommand::ExportGenesisWasm(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
@@ -584,40 +487,28 @@ pub fn run() -> Result<()> {
 
 			// Switch on the concrete benchmark sub-command-
 			match cmd {
-				BenchmarkCmd::Pallet(cmd) =>
-					if cfg!(feature = "runtime-benchmarks") {
-						runner.sync_run(|config| cmd.run_with_spec::<sp_runtime::traits::HashingFor<Block>, ReclaimHostFunctions>(Some(config.chain_spec)))
-					} else {
-						Err("Benchmarking wasn't enabled when building the node. \
-				You can enable it with `--features runtime-benchmarks`."
-							.into())
-					},
+				#[cfg(feature = "runtime-benchmarks")]
+				BenchmarkCmd::Pallet(cmd) => runner.sync_run(|config| {
+					cmd.run_with_spec::<HashingFor<Block>, ReclaimHostFunctions>(Some(
+						config.chain_spec,
+					))
+				}),
 				BenchmarkCmd::Block(cmd) => runner.sync_run(|config| {
-					construct_partials!(config, |partials| cmd.run(partials.client))
+					let node = new_node_spec(&config, cli.node_extra_args())?;
+					node.run_benchmark_block_cmd(config, cmd)
 				}),
-				#[cfg(not(feature = "runtime-benchmarks"))]
-				BenchmarkCmd::Storage(_) =>
-					return Err(sc_cli::Error::Input(
-						"Compile with --features=runtime-benchmarks \
-						to enable storage benchmarks."
-							.into(),
-					)
-					.into()),
 				#[cfg(feature = "runtime-benchmarks")]
 				BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| {
-					construct_partials!(config, |partials| {
-						let db = partials.backend.expose_db();
-						let storage = partials.backend.expose_storage();
-
-						cmd.run(config, partials.client.clone(), db, storage)
-					})
+					let node = new_node_spec(&config, cli.node_extra_args())?;
+					node.run_benchmark_storage_cmd(config, cmd)
 				}),
 				BenchmarkCmd::Machine(cmd) =>
 					runner.sync_run(|config| cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())),
-				// NOTE: this allows the Client to leniently implement
-				// new benchmark commands without requiring a companion MR.
 				#[allow(unreachable_patterns)]
-				_ => Err("Benchmarking sub-command unsupported".into()),
+				_ => Err("Benchmarking sub-command unsupported or compilation feature missing. \
+					Make sure to compile with --features=runtime-benchmarks \
+					to enable all supported benchmarks."
+					.into()),
 			}
 		},
 		Some(Subcommand::Key(cmd)) => Ok(cmd.run(&cli)?),
@@ -645,25 +536,33 @@ pub fn run() -> Result<()> {
 
 					if old_path.exists() && new_path.exists() {
 						return Err(format!(
-							"Found legacy {} path {} and new asset-hub path {}. Delete one path such that only one exists.",
-							old_name, old_path.display(), new_path.display()
-						).into())
+							"Found legacy {} path {} and new Asset Hub path {}. \
+							Delete one path such that only one exists.",
+							old_name,
+							old_path.display(),
+							new_path.display()
+						)
+						.into())
 					}
 
 					if old_path.exists() {
 						std::fs::rename(old_path.clone(), new_path.clone())?;
 						info!(
-							"Statemint renamed to Asset Hub. The filepath with associated data on disk has been renamed from {} to {}.",
-							old_path.display(), new_path.display()
+							"{} was renamed to Asset Hub. The filepath with associated data on disk \
+							has been renamed from {} to {}.",
+							old_name,
+							old_path.display(),
+							new_path.display()
 						);
 					}
 				}
 
-				let hwbench = (!cli.no_hardware_benchmarks).then_some(
-					config.database.path().map(|database_path| {
+				let hwbench = (!cli.no_hardware_benchmarks)
+					.then_some(config.database.path().map(|database_path| {
 						let _ = std::fs::create_dir_all(database_path);
 						sc_sysinfo::gather_hwbench(Some(database_path))
-					})).flatten();
+					}))
+					.flatten();
 
 				let para_id = chain_spec::Extensions::try_get(&*config.chain_spec)
 					.map(|e| e.para_id)
@@ -672,7 +571,9 @@ pub fn run() -> Result<()> {
 				let id = ParaId::from(para_id);
 
 				let parachain_account =
-					AccountIdConversion::<polkadot_primitives::AccountId>::into_account_truncating(&id);
+					AccountIdConversion::<polkadot_primitives::AccountId>::into_account_truncating(
+						&id,
+					);
 
 				let tokio_handle = config.tokio_handle.clone();
 				let polkadot_config =
@@ -683,209 +584,34 @@ pub fn run() -> Result<()> {
 				info!("🧾 Parachain Account: {}", parachain_account);
 				info!("✍️ Is collating: {}", if config.role.is_authority() { "yes" } else { "no" });
 
-				match config.network.network_backend {
-					sc_network::config::NetworkBackendType::Libp2p =>
-						start_node::<sc_network::NetworkWorker<_, _>>(
-							config,
-							polkadot_config,
-							collator_options,
-							id,
-							cli.experimental_use_slot_based,
-							hwbench,
-						)
-						.await,
-					sc_network::config::NetworkBackendType::Litep2p =>
-						start_node::<sc_network::Litep2pNetworkBackend>(
-							config,
-							polkadot_config,
-							collator_options,
-							id,
-							cli.experimental_use_slot_based,
-							hwbench,
-						)
-						.await,
-				}
+				start_node(
+					config,
+					polkadot_config,
+					collator_options,
+					id,
+					cli.node_extra_args(),
+					hwbench,
+				)
+				.await
 			})
 		},
 	}
 }
 
-async fn start_node<Network: sc_network::NetworkBackend<Block, Hash>>(
+#[sc_tracing::logging::prefix_logs_with("Parachain")]
+async fn start_node(
 	config: sc_service::Configuration,
 	polkadot_config: sc_service::Configuration,
 	collator_options: cumulus_client_cli::CollatorOptions,
 	id: ParaId,
-	use_experimental_slot_based: bool,
+	extra_args: NodeExtraArgs,
 	hwbench: Option<sc_sysinfo::HwBench>,
 ) -> Result<sc_service::TaskManager> {
-	match config.chain_spec.runtime()? {
-		Runtime::AssetHubPolkadot =>
-			crate::service::start_asset_hub_async_backing_node::<
-				AssetHubPolkadotRuntimeApi,
-				AssetHubPolkadotAuraId,
-				Network,
-			>(config, polkadot_config, collator_options, id, use_experimental_slot_based, hwbench)
-			.await
-			.map(|r| r.0)
-			.map_err(Into::into),
-
-		Runtime::AssetHub | Runtime::Collectives =>
-			crate::service::start_generic_aura_async_backing_node::<Network>(
-				config,
-				polkadot_config,
-				collator_options,
-				id,
-				use_experimental_slot_based,
-				hwbench,
-			)
-			.await
-			.map(|r| r.0)
-			.map_err(Into::into),
-
-		Runtime::Seedling | Runtime::Shell => crate::service::start_shell_node::<Network>(
-			config,
-			polkadot_config,
-			collator_options,
-			id,
-			hwbench,
-		)
+	let node_spec = new_node_spec(&config, extra_args)?;
+	node_spec
+		.start_node(config, polkadot_config, collator_options, id, hwbench)
 		.await
-		.map(|r| r.0)
-		.map_err(Into::into),
-
-		Runtime::ContractsRococo => crate::service::start_contracts_rococo_node::<Network>(
-			config,
-			polkadot_config,
-			collator_options,
-			id,
-			use_experimental_slot_based,
-			hwbench,
-		)
-		.await
-		.map(|r| r.0)
-		.map_err(Into::into),
-
-		Runtime::BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type {
-			chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot |
-			chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal |
-			chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama |
-			chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal |
-			chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend |
-			chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal |
-			chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment |
-			chain_spec::bridge_hubs::BridgeHubRuntimeType::Rococo |
-			chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal |
-			chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment =>
-				crate::service::start_generic_aura_async_backing_node::<Network>(
-					config,
-					polkadot_config,
-					collator_options,
-					id,
-					use_experimental_slot_based,
-					hwbench,
-				)
-				.await
-				.map(|r| r.0),
-		}
-		.map_err(Into::into),
-
-		Runtime::Coretime(coretime_runtime_type) => match coretime_runtime_type {
-			chain_spec::coretime::CoretimeRuntimeType::Kusama |
-			chain_spec::coretime::CoretimeRuntimeType::KusamaLocal |
-			chain_spec::coretime::CoretimeRuntimeType::Polkadot |
-			chain_spec::coretime::CoretimeRuntimeType::PolkadotLocal |
-			chain_spec::coretime::CoretimeRuntimeType::Rococo |
-			chain_spec::coretime::CoretimeRuntimeType::RococoLocal |
-			chain_spec::coretime::CoretimeRuntimeType::RococoDevelopment |
-			chain_spec::coretime::CoretimeRuntimeType::Westend |
-			chain_spec::coretime::CoretimeRuntimeType::WestendLocal |
-			chain_spec::coretime::CoretimeRuntimeType::WestendDevelopment =>
-				crate::service::start_generic_aura_async_backing_node::<Network>(
-					config,
-					polkadot_config,
-					collator_options,
-					id,
-					use_experimental_slot_based,
-					hwbench,
-				)
-				.await
-				.map(|r| r.0),
-		}
-		.map_err(Into::into),
-
-		Runtime::Penpal(_) => crate::service::start_rococo_parachain_node::<Network>(
-			config,
-			polkadot_config,
-			collator_options,
-			id,
-			use_experimental_slot_based,
-			hwbench,
-		)
-		.await
-		.map(|r| r.0)
-		.map_err(Into::into),
-
-		Runtime::Glutton => crate::service::start_basic_async_backing_node::<Network>(
-			config,
-			polkadot_config,
-			collator_options,
-			id,
-			use_experimental_slot_based,
-			hwbench,
-		)
-		.await
-		.map(|r| r.0)
-		.map_err(Into::into),
-
-		Runtime::People(people_runtime_type) => match people_runtime_type {
-			chain_spec::people::PeopleRuntimeType::Kusama |
-			chain_spec::people::PeopleRuntimeType::KusamaLocal |
-			chain_spec::people::PeopleRuntimeType::Polkadot |
-			chain_spec::people::PeopleRuntimeType::PolkadotLocal |
-			chain_spec::people::PeopleRuntimeType::Rococo |
-			chain_spec::people::PeopleRuntimeType::RococoLocal |
-			chain_spec::people::PeopleRuntimeType::RococoDevelopment |
-			chain_spec::people::PeopleRuntimeType::Westend |
-			chain_spec::people::PeopleRuntimeType::WestendLocal |
-			chain_spec::people::PeopleRuntimeType::WestendDevelopment =>
-				crate::service::start_generic_aura_async_backing_node::<Network>(
-					config,
-					polkadot_config,
-					collator_options,
-					id,
-					use_experimental_slot_based,
-					hwbench,
-				)
-				.await
-				.map(|r| r.0),
-		}
-		.map_err(Into::into),
-		Runtime::Omni(consensus) => match consensus {
-			// rococo actually uses aura import and consensus, unlike most system chains that use
-			// relay to aura.
-			Consensus::Aura => crate::service::start_rococo_parachain_node::<Network>(
-				config,
-				polkadot_config,
-				collator_options,
-				id,
-				use_experimental_slot_based,
-				hwbench,
-			)
-			.await
-			.map(|r| r.0)
-			.map_err(Into::into),
-			Consensus::Relay => crate::service::start_shell_node::<Network>(
-				config,
-				polkadot_config,
-				collator_options,
-				id,
-				hwbench,
-			)
-			.await
-			.map(|r| r.0)
-			.map_err(Into::into),
-		},
-	}
+		.map_err(Into::into)
 }
 
 impl DefaultConfigurationValues for RelayChainCli {
diff --git a/cumulus/polkadot-parachain/src/common/mod.rs b/cumulus/polkadot-parachain/src/common/mod.rs
index 5adbb4137cd3da4070dc7f271cf2ac1c826e395c..9f5febafe30427fbd549b79e81c83d2392a795d5 100644
--- a/cumulus/polkadot-parachain/src/common/mod.rs
+++ b/cumulus/polkadot-parachain/src/common/mod.rs
@@ -65,3 +65,8 @@ where
 {
 	type BoundedRuntimeApi = T::RuntimeApi;
 }
+
+/// Extra args that are passed when creating a new node spec.
+pub struct NodeExtraArgs {
+	pub use_slot_based_consensus: bool,
+}
diff --git a/cumulus/polkadot-parachain/src/rpc.rs b/cumulus/polkadot-parachain/src/rpc.rs
index 7437bb1f4b9372f2454cbe6d491d302056606571..283a73d931d769fbd7b521c6f8a4a7558fc48be0 100644
--- a/cumulus/polkadot-parachain/src/rpc.rs
+++ b/cumulus/polkadot-parachain/src/rpc.rs
@@ -18,91 +18,82 @@
 
 #![warn(missing_docs)]
 
-use std::sync::Arc;
-
+use crate::{
+	common::ConstructNodeRuntimeApi,
+	service::{ParachainBackend, ParachainClient},
+};
+use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
 use parachains_common::{AccountId, Balance, Block, Nonce};
-use sc_client_api::AuxStore;
-pub use sc_rpc::DenyUnsafe;
-use sc_transaction_pool_api::TransactionPool;
-use sp_api::ProvideRuntimeApi;
-use sp_block_builder::BlockBuilder;
-use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata};
+use sc_rpc::{
+	dev::{Dev, DevApiServer},
+	DenyUnsafe,
+};
+use std::{marker::PhantomData, sync::Arc};
+use substrate_frame_rpc_system::{System, SystemApiServer};
+use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer};
 
 /// A type representing all RPC extensions.
 pub type RpcExtension = jsonrpsee::RpcModule<()>;
 
-/// Full client dependencies
-pub struct FullDeps<C, P> {
-	/// The client instance to use.
-	pub client: Arc<C>,
-	/// Transaction pool instance.
-	pub pool: Arc<P>,
-	/// Whether to deny unsafe calls
-	pub deny_unsafe: DenyUnsafe,
+pub(crate) trait BuildRpcExtensions<Client, Backend, Pool> {
+	fn build_rpc_extensions(
+		deny_unsafe: DenyUnsafe,
+		client: Arc<Client>,
+		backend: Arc<Backend>,
+		pool: Arc<Pool>,
+	) -> sc_service::error::Result<RpcExtension>;
 }
 
-/// Instantiate all RPC extensions.
-pub fn create_full<C, P, B>(
-	deps: FullDeps<C, P>,
-	backend: Arc<B>,
-) -> Result<RpcExtension, Box<dyn std::error::Error + Send + Sync>>
+pub(crate) struct BuildEmptyRpcExtensions<RuntimeApi>(PhantomData<RuntimeApi>);
+
+impl<RuntimeApi>
+	BuildRpcExtensions<
+		ParachainClient<RuntimeApi>,
+		ParachainBackend,
+		sc_transaction_pool::FullPool<Block, ParachainClient<RuntimeApi>>,
+	> for BuildEmptyRpcExtensions<RuntimeApi>
 where
-	C: ProvideRuntimeApi<Block>
-		+ HeaderBackend<Block>
-		+ AuxStore
-		+ HeaderMetadata<Block, Error = BlockChainError>
-		+ Send
-		+ Sync
-		+ 'static,
-	C::Api: substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
-	C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, Balance>,
-	C::Api: BlockBuilder<Block>,
-	P: TransactionPool + Sync + Send + 'static,
-	B: sc_client_api::Backend<Block> + Send + Sync + 'static,
-	B::State: sc_client_api::backend::StateBackend<sp_runtime::traits::HashingFor<Block>>,
+	RuntimeApi: ConstructNodeRuntimeApi<Block, ParachainClient<RuntimeApi>> + Send + Sync + 'static,
 {
-	use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
-	use substrate_frame_rpc_system::{System, SystemApiServer};
-	use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer};
-
-	let mut module = RpcExtension::new(());
-	let FullDeps { client, pool, deny_unsafe } = deps;
-
-	module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?;
-	module.merge(TransactionPayment::new(client.clone()).into_rpc())?;
-	module.merge(StateMigration::new(client, backend, deny_unsafe).into_rpc())?;
-
-	Ok(module)
+	fn build_rpc_extensions(
+		_deny_unsafe: DenyUnsafe,
+		_client: Arc<ParachainClient<RuntimeApi>>,
+		_backend: Arc<ParachainBackend>,
+		_pool: Arc<sc_transaction_pool::FullPool<Block, ParachainClient<RuntimeApi>>>,
+	) -> sc_service::error::Result<RpcExtension> {
+		Ok(RpcExtension::new(()))
+	}
 }
 
-/// Instantiate all RPCs we want at the contracts-rococo chain.
-pub fn create_contracts_rococo<C, P>(
-	deps: FullDeps<C, P>,
-) -> Result<RpcExtension, Box<dyn std::error::Error + Send + Sync>>
+pub(crate) struct BuildParachainRpcExtensions<RuntimeApi>(PhantomData<RuntimeApi>);
+
+impl<RuntimeApi>
+	BuildRpcExtensions<
+		ParachainClient<RuntimeApi>,
+		ParachainBackend,
+		sc_transaction_pool::FullPool<Block, ParachainClient<RuntimeApi>>,
+	> for BuildParachainRpcExtensions<RuntimeApi>
 where
-	C: ProvideRuntimeApi<Block>
-		+ sc_client_api::BlockBackend<Block>
-		+ HeaderBackend<Block>
-		+ AuxStore
-		+ HeaderMetadata<Block, Error = BlockChainError>
-		+ Send
-		+ Sync
-		+ 'static,
-	C::Api: substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
-	C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, Balance>,
-	C::Api: BlockBuilder<Block>,
-	P: TransactionPool + Sync + Send + 'static,
+	RuntimeApi: ConstructNodeRuntimeApi<Block, ParachainClient<RuntimeApi>> + Send + Sync + 'static,
+	RuntimeApi::RuntimeApi: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, Balance>
+		+ substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
 {
-	use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
-	use sc_rpc::dev::{Dev, DevApiServer};
-	use substrate_frame_rpc_system::{System, SystemApiServer};
-
-	let mut module = RpcExtension::new(());
-	let FullDeps { client, pool, deny_unsafe } = deps;
-
-	module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?;
-	module.merge(TransactionPayment::new(client.clone()).into_rpc())?;
-	module.merge(Dev::new(client, deny_unsafe).into_rpc())?;
-
-	Ok(module)
+	fn build_rpc_extensions(
+		deny_unsafe: DenyUnsafe,
+		client: Arc<ParachainClient<RuntimeApi>>,
+		backend: Arc<ParachainBackend>,
+		pool: Arc<sc_transaction_pool::FullPool<Block, ParachainClient<RuntimeApi>>>,
+	) -> sc_service::error::Result<RpcExtension> {
+		let build = || -> Result<RpcExtension, Box<dyn std::error::Error + Send + Sync>> {
+			let mut module = RpcExtension::new(());
+
+			module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?;
+			module.merge(TransactionPayment::new(client.clone()).into_rpc())?;
+			module.merge(StateMigration::new(client.clone(), backend, deny_unsafe).into_rpc())?;
+			module.merge(Dev::new(client, deny_unsafe).into_rpc())?;
+
+			Ok(module)
+		};
+		build().map_err(Into::into)
+	}
 }
diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs
index 0f2aed8ee4d85769a5d4e8818785bef01fd03cec..f5f6189d1f0d6b399f584df77bf70bd3961781df 100644
--- a/cumulus/polkadot-parachain/src/service.rs
+++ b/cumulus/polkadot-parachain/src/service.rs
@@ -14,7 +14,7 @@
 // You should have received a copy of the GNU General Public License
 // along with Cumulus.  If not, see <http://www.gnu.org/licenses/>.
 
-use cumulus_client_cli::CollatorOptions;
+use cumulus_client_cli::{CollatorOptions, ExportGenesisHeadCommand};
 use cumulus_client_collator::service::CollatorService;
 use cumulus_client_consensus_aura::collators::{
 	lookahead::{self as aura, Params as AuraParams},
@@ -22,6 +22,7 @@ use cumulus_client_consensus_aura::collators::{
 };
 use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport;
 use cumulus_client_consensus_proposer::Proposer;
+use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier;
 #[allow(deprecated)]
 use cumulus_client_service::old_consensus;
 use cumulus_client_service::{
@@ -30,39 +31,40 @@ use cumulus_client_service::{
 };
 use cumulus_primitives_core::{relay_chain::ValidationCode, ParaId};
 use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface};
-use sc_rpc::DenyUnsafe;
-
-use jsonrpsee::RpcModule;
 
 use crate::{
 	common::{
 		aura::{AuraIdT, AuraRuntimeApi},
-		ConstructNodeRuntimeApi,
+		ConstructNodeRuntimeApi, NodeExtraArgs,
 	},
 	fake_runtime_api::aura::RuntimeApi as FakeRuntimeApi,
-	rpc,
+	rpc::BuildRpcExtensions,
 };
-pub use parachains_common::{AccountId, AuraId, Balance, Block, Hash, Nonce};
+pub use parachains_common::{AccountId, Balance, Block, Hash, Nonce};
 
-use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier;
+use crate::rpc::{BuildEmptyRpcExtensions, BuildParachainRpcExtensions};
+use frame_benchmarking_cli::BlockCmd;
+#[cfg(any(feature = "runtime-benchmarks"))]
+use frame_benchmarking_cli::StorageCmd;
 use futures::prelude::*;
+use polkadot_primitives::CollatorPair;
 use prometheus_endpoint::Registry;
+use sc_cli::{CheckBlockCmd, ExportBlocksCmd, ExportStateCmd, ImportBlocksCmd, RevertCmd};
 use sc_client_api::BlockchainEvents;
 use sc_consensus::{
 	import_queue::{BasicQueue, Verifier as VerifierT},
-	BlockImportParams, ImportQueue,
+	BlockImportParams, DefaultImportQueue, ImportQueue,
 };
 use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
 use sc_network::{config::FullNetworkConfiguration, service::traits::NetworkBackend, NetworkBlock};
-use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager};
+use sc_service::{Configuration, Error, PartialComponents, TFullBackend, TFullClient, TaskManager};
+use sc_sysinfo::HwBench;
 use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
-use sp_api::{ApiExt, ConstructRuntimeApi, ProvideRuntimeApi};
-use sp_consensus_aura::AuraApi;
+use sc_transaction_pool::FullPool;
+use sp_api::ProvideRuntimeApi;
 use sp_keystore::KeystorePtr;
 use sp_runtime::{app_crypto::AppCrypto, traits::Header as HeaderT};
-use std::{marker::PhantomData, sync::Arc, time::Duration};
-
-use polkadot_primitives::CollatorPair;
+use std::{marker::PhantomData, pin::Pin, sync::Arc, time::Duration};
 
 #[cfg(not(feature = "runtime-benchmarks"))]
 type HostFunctions = cumulus_client_service::ParachainHostFunctions;
@@ -73,9 +75,9 @@ type HostFunctions = (
 	frame_benchmarking::benchmarking::HostFunctions,
 );
 
-type ParachainClient<RuntimeApi> = TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>;
+pub type ParachainClient<RuntimeApi> = TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>;
 
-type ParachainBackend = TFullBackend<Block>;
+pub type ParachainBackend = TFullBackend<Block>;
 
 type ParachainBlockImport<RuntimeApi> =
 	TParachainBlockImport<Block, Arc<ParachainClient<RuntimeApi>>, ParachainBackend>;
@@ -90,413 +92,312 @@ pub type Service<RuntimeApi> = PartialComponents<
 	(ParachainBlockImport<RuntimeApi>, Option<Telemetry>, Option<TelemetryWorkerHandle>),
 >;
 
-/// Starts a `ServiceBuilder` for a full service.
-///
-/// Use this macro if you don't actually need the full service, but just the builder in order to
-/// be able to perform chain operations.
-pub fn new_partial<RuntimeApi, BIQ>(
-	config: &Configuration,
-	build_import_queue: BIQ,
-) -> Result<Service<RuntimeApi>, sc_service::Error>
-where
-	RuntimeApi: ConstructNodeRuntimeApi<Block, ParachainClient<RuntimeApi>>,
-	BIQ: FnOnce(
-		Arc<ParachainClient<RuntimeApi>>,
-		ParachainBlockImport<RuntimeApi>,
-		&Configuration,
-		Option<TelemetryHandle>,
-		&TaskManager,
-	) -> Result<sc_consensus::DefaultImportQueue<Block>, sc_service::Error>,
-{
-	let telemetry = config
-		.telemetry_endpoints
-		.clone()
-		.filter(|x| !x.is_empty())
-		.map(|endpoints| -> Result<_, sc_telemetry::Error> {
-			let worker = TelemetryWorker::new(16)?;
-			let telemetry = worker.handle().new_telemetry(endpoints);
-			Ok((worker, telemetry))
-		})
-		.transpose()?;
-
-	let heap_pages = config
-		.default_heap_pages
-		.map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { extra_pages: h as _ });
-
-	let executor = sc_executor::WasmExecutor::<HostFunctions>::builder()
-		.with_execution_method(config.wasm_method)
-		.with_max_runtime_instances(config.max_runtime_instances)
-		.with_runtime_cache_size(config.runtime_cache_size)
-		.with_onchain_heap_alloc_strategy(heap_pages)
-		.with_offchain_heap_alloc_strategy(heap_pages)
-		.build();
-
-	let (client, backend, keystore_container, task_manager) =
-		sc_service::new_full_parts_record_import::<Block, RuntimeApi, _>(
-			config,
-			telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
-			executor,
-			true,
-		)?;
-	let client = Arc::new(client);
-
-	let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
-
-	let telemetry = telemetry.map(|(worker, telemetry)| {
-		task_manager.spawn_handle().spawn("telemetry", None, worker.run());
-		telemetry
-	});
-
-	let transaction_pool = sc_transaction_pool::BasicPool::new_full(
-		config.transaction_pool.clone(),
-		config.role.is_authority().into(),
-		config.prometheus_registry(),
-		task_manager.spawn_essential_handle(),
-		client.clone(),
-	);
-
-	let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
-
-	let import_queue = build_import_queue(
-		client.clone(),
-		block_import.clone(),
-		config,
-		telemetry.as_ref().map(|telemetry| telemetry.handle()),
-		&task_manager,
-	)?;
-
-	Ok(PartialComponents {
-		backend,
-		client,
-		import_queue,
-		keystore_container,
-		task_manager,
-		transaction_pool,
-		select_chain: (),
-		other: (block_import, telemetry, telemetry_worker_handle),
-	})
+pub(crate) trait BuildImportQueue<RuntimeApi> {
+	fn build_import_queue(
+		client: Arc<ParachainClient<RuntimeApi>>,
+		block_import: ParachainBlockImport<RuntimeApi>,
+		config: &Configuration,
+		telemetry_handle: Option<TelemetryHandle>,
+		task_manager: &TaskManager,
+	) -> sc_service::error::Result<DefaultImportQueue<Block>>;
 }
 
-/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
-///
-/// This is the actual implementation that is abstract over the executor and the runtime api.
-#[sc_tracing::logging::prefix_logs_with("Parachain")]
-async fn start_node_impl<RuntimeApi, RB, BIQ, SC, Net>(
-	parachain_config: Configuration,
-	polkadot_config: Configuration,
-	collator_options: CollatorOptions,
-	sybil_resistance_level: CollatorSybilResistance,
-	para_id: ParaId,
-	rpc_ext_builder: RB,
-	build_import_queue: BIQ,
-	start_consensus: SC,
-	hwbench: Option<sc_sysinfo::HwBench>,
-) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<RuntimeApi>>)>
+pub(crate) trait StartConsensus<RuntimeApi>
 where
 	RuntimeApi: ConstructNodeRuntimeApi<Block, ParachainClient<RuntimeApi>>,
-	RB: Fn(
-			DenyUnsafe,
-			Arc<ParachainClient<RuntimeApi>>,
-			Arc<ParachainBackend>,
-			Arc<sc_transaction_pool::FullPool<Block, ParachainClient<RuntimeApi>>>,
-		) -> Result<jsonrpsee::RpcModule<()>, sc_service::Error>
-		+ 'static,
-	BIQ: FnOnce(
-		Arc<ParachainClient<RuntimeApi>>,
-		ParachainBlockImport<RuntimeApi>,
-		&Configuration,
-		Option<TelemetryHandle>,
-		&TaskManager,
-	) -> Result<sc_consensus::DefaultImportQueue<Block>, sc_service::Error>,
-	SC: FnOnce(
-		Arc<ParachainClient<RuntimeApi>>,
-		ParachainBlockImport<RuntimeApi>,
-		Option<&Registry>,
-		Option<TelemetryHandle>,
-		&TaskManager,
-		Arc<dyn RelayChainInterface>,
-		Arc<sc_transaction_pool::FullPool<Block, ParachainClient<RuntimeApi>>>,
-		KeystorePtr,
-		Duration,
-		ParaId,
-		CollatorPair,
-		OverseerHandle,
-		Arc<dyn Fn(Hash, Option<Vec<u8>>) + Send + Sync>,
-		Arc<ParachainBackend>,
-	) -> Result<(), sc_service::Error>,
-	Net: NetworkBackend<Block, Hash>,
 {
-	let parachain_config = prepare_node_config(parachain_config);
-
-	let params = new_partial::<RuntimeApi, BIQ>(&parachain_config, build_import_queue)?;
-	let (block_import, mut telemetry, telemetry_worker_handle) = params.other;
-
-	let client = params.client.clone();
-	let backend = params.backend.clone();
-
-	let mut task_manager = params.task_manager;
-	let (relay_chain_interface, collator_key) = build_relay_chain_interface(
-		polkadot_config,
-		&parachain_config,
-		telemetry_worker_handle,
-		&mut task_manager,
-		collator_options.clone(),
-		hwbench.clone(),
-	)
-	.await
-	.map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
-
-	let validator = parachain_config.role.is_authority();
-	let prometheus_registry = parachain_config.prometheus_registry().cloned();
-	let transaction_pool = params.transaction_pool.clone();
-	let import_queue_service = params.import_queue.service();
-	let net_config = FullNetworkConfiguration::<_, _, Net>::new(&parachain_config.network);
-
-	let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
-		build_network(BuildNetworkParams {
-			parachain_config: &parachain_config,
-			net_config,
-			client: client.clone(),
-			transaction_pool: transaction_pool.clone(),
-			para_id,
-			spawn_handle: task_manager.spawn_handle(),
-			relay_chain_interface: relay_chain_interface.clone(),
-			import_queue: params.import_queue,
-			sybil_resistance_level,
-		})
-		.await?;
-
-	let rpc_builder = {
-		let client = client.clone();
-		let transaction_pool = transaction_pool.clone();
-		let backend_for_rpc = backend.clone();
-
-		Box::new(move |deny_unsafe, _| {
-			rpc_ext_builder(
-				deny_unsafe,
-				client.clone(),
-				backend_for_rpc.clone(),
-				transaction_pool.clone(),
-			)
-		})
-	};
-
-	sc_service::spawn_tasks(sc_service::SpawnTasksParams {
-		rpc_builder,
-		client: client.clone(),
-		transaction_pool: transaction_pool.clone(),
-		task_manager: &mut task_manager,
-		config: parachain_config,
-		keystore: params.keystore_container.keystore(),
-		backend: backend.clone(),
-		network: network.clone(),
-		sync_service: sync_service.clone(),
-		system_rpc_tx,
-		tx_handler_controller,
-		telemetry: telemetry.as_mut(),
-	})?;
-
-	if let Some(hwbench) = hwbench {
-		sc_sysinfo::print_hwbench(&hwbench);
-		if validator {
-			warn_if_slow_hardware(&hwbench);
-		}
-
-		if let Some(ref mut telemetry) = telemetry {
-			let telemetry_handle = telemetry.handle();
-			task_manager.spawn_handle().spawn(
-				"telemetry_hwbench",
-				None,
-				sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench),
-			);
-		}
-	}
-
-	let announce_block = {
-		let sync_service = sync_service.clone();
-		Arc::new(move |hash, data| sync_service.announce_block(hash, data))
-	};
+	fn start_consensus(
+		client: Arc<ParachainClient<RuntimeApi>>,
+		block_import: ParachainBlockImport<RuntimeApi>,
+		prometheus_registry: Option<&Registry>,
+		telemetry: Option<TelemetryHandle>,
+		task_manager: &TaskManager,
+		relay_chain_interface: Arc<dyn RelayChainInterface>,
+		transaction_pool: Arc<sc_transaction_pool::FullPool<Block, ParachainClient<RuntimeApi>>>,
+		keystore: KeystorePtr,
+		relay_chain_slot_duration: Duration,
+		para_id: ParaId,
+		collator_key: CollatorPair,
+		overseer_handle: OverseerHandle,
+		announce_block: Arc<dyn Fn(Hash, Option<Vec<u8>>) + Send + Sync>,
+		backend: Arc<ParachainBackend>,
+	) -> Result<(), sc_service::Error>;
+}
 
-	let relay_chain_slot_duration = Duration::from_secs(6);
+pub(crate) trait NodeSpec {
+	type RuntimeApi: ConstructNodeRuntimeApi<Block, ParachainClient<Self::RuntimeApi>>;
+
+	type BuildImportQueue: BuildImportQueue<Self::RuntimeApi> + 'static;
+
+	type BuildRpcExtensions: BuildRpcExtensions<
+			ParachainClient<Self::RuntimeApi>,
+			ParachainBackend,
+			sc_transaction_pool::FullPool<Block, ParachainClient<Self::RuntimeApi>>,
+		> + 'static;
+
+	type StartConsensus: StartConsensus<Self::RuntimeApi> + 'static;
+
+	const SYBIL_RESISTANCE: CollatorSybilResistance;
+
+	/// Starts a `ServiceBuilder` for a full service.
+	///
+	/// Use this macro if you don't actually need the full service, but just the builder in order to
+	/// be able to perform chain operations.
+	fn new_partial(config: &Configuration) -> sc_service::error::Result<Service<Self::RuntimeApi>> {
+		let telemetry = config
+			.telemetry_endpoints
+			.clone()
+			.filter(|x| !x.is_empty())
+			.map(|endpoints| -> Result<_, sc_telemetry::Error> {
+				let worker = TelemetryWorker::new(16)?;
+				let telemetry = worker.handle().new_telemetry(endpoints);
+				Ok((worker, telemetry))
+			})
+			.transpose()?;
+
+		let heap_pages = config.default_heap_pages.map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| {
+			HeapAllocStrategy::Static { extra_pages: h as _ }
+		});
+
+		let executor = sc_executor::WasmExecutor::<HostFunctions>::builder()
+			.with_execution_method(config.wasm_method)
+			.with_max_runtime_instances(config.max_runtime_instances)
+			.with_runtime_cache_size(config.runtime_cache_size)
+			.with_onchain_heap_alloc_strategy(heap_pages)
+			.with_offchain_heap_alloc_strategy(heap_pages)
+			.build();
+
+		let (client, backend, keystore_container, task_manager) =
+			sc_service::new_full_parts_record_import::<Block, Self::RuntimeApi, _>(
+				config,
+				telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
+				executor,
+				true,
+			)?;
+		let client = Arc::new(client);
+
+		let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
+
+		let telemetry = telemetry.map(|(worker, telemetry)| {
+			task_manager.spawn_handle().spawn("telemetry", None, worker.run());
+			telemetry
+		});
+
+		let transaction_pool = sc_transaction_pool::BasicPool::new_full(
+			config.transaction_pool.clone(),
+			config.role.is_authority().into(),
+			config.prometheus_registry(),
+			task_manager.spawn_essential_handle(),
+			client.clone(),
+		);
 
-	let overseer_handle = relay_chain_interface
-		.overseer_handle()
-		.map_err(|e| sc_service::Error::Application(Box::new(e)))?;
+		let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
 
-	start_relay_chain_tasks(StartRelayChainTasksParams {
-		client: client.clone(),
-		announce_block: announce_block.clone(),
-		para_id,
-		relay_chain_interface: relay_chain_interface.clone(),
-		task_manager: &mut task_manager,
-		da_recovery_profile: if validator {
-			DARecoveryProfile::Collator
-		} else {
-			DARecoveryProfile::FullNode
-		},
-		import_queue: import_queue_service,
-		relay_chain_slot_duration,
-		recovery_handle: Box::new(overseer_handle.clone()),
-		sync_service: sync_service.clone(),
-	})?;
-
-	if validator {
-		start_consensus(
+		let import_queue = Self::BuildImportQueue::build_import_queue(
 			client.clone(),
-			block_import,
-			prometheus_registry.as_ref(),
-			telemetry.as_ref().map(|t| t.handle()),
+			block_import.clone(),
+			config,
+			telemetry.as_ref().map(|telemetry| telemetry.handle()),
 			&task_manager,
-			relay_chain_interface.clone(),
-			transaction_pool,
-			params.keystore_container.keystore(),
-			relay_chain_slot_duration,
-			para_id,
-			collator_key.expect("Command line arguments do not allow this. qed"),
-			overseer_handle,
-			announce_block,
-			backend.clone(),
 		)?;
+
+		Ok(PartialComponents {
+			backend,
+			client,
+			import_queue,
+			keystore_container,
+			task_manager,
+			transaction_pool,
+			select_chain: (),
+			other: (block_import, telemetry, telemetry_worker_handle),
+		})
 	}
 
-	start_network.start_network();
+	/// Start a node with the given parachain spec.
+	///
+	/// This is the actual implementation that is abstract over the executor and the runtime api.
+	fn start_node<Net>(
+		parachain_config: Configuration,
+		polkadot_config: Configuration,
+		collator_options: CollatorOptions,
+		para_id: ParaId,
+		hwbench: Option<sc_sysinfo::HwBench>,
+	) -> Pin<Box<dyn Future<Output = sc_service::error::Result<TaskManager>>>>
+	where
+		Net: NetworkBackend<Block, Hash>,
+	{
+		Box::pin(async move {
+			let parachain_config = prepare_node_config(parachain_config);
+
+			let params = Self::new_partial(&parachain_config)?;
+			let (block_import, mut telemetry, telemetry_worker_handle) = params.other;
+
+			let client = params.client.clone();
+			let backend = params.backend.clone();
+
+			let mut task_manager = params.task_manager;
+			let (relay_chain_interface, collator_key) = build_relay_chain_interface(
+				polkadot_config,
+				&parachain_config,
+				telemetry_worker_handle,
+				&mut task_manager,
+				collator_options.clone(),
+				hwbench.clone(),
+			)
+			.await
+			.map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
+
+			let validator = parachain_config.role.is_authority();
+			let prometheus_registry = parachain_config.prometheus_registry().cloned();
+			let transaction_pool = params.transaction_pool.clone();
+			let import_queue_service = params.import_queue.service();
+			let net_config = FullNetworkConfiguration::<_, _, Net>::new(&parachain_config.network);
+
+			let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
+				build_network(BuildNetworkParams {
+					parachain_config: &parachain_config,
+					net_config,
+					client: client.clone(),
+					transaction_pool: transaction_pool.clone(),
+					para_id,
+					spawn_handle: task_manager.spawn_handle(),
+					relay_chain_interface: relay_chain_interface.clone(),
+					import_queue: params.import_queue,
+					sybil_resistance_level: Self::SYBIL_RESISTANCE,
+				})
+				.await?;
+
+			let rpc_builder = {
+				let client = client.clone();
+				let transaction_pool = transaction_pool.clone();
+				let backend_for_rpc = backend.clone();
+
+				Box::new(move |deny_unsafe, _| {
+					Self::BuildRpcExtensions::build_rpc_extensions(
+						deny_unsafe,
+						client.clone(),
+						backend_for_rpc.clone(),
+						transaction_pool.clone(),
+					)
+				})
+			};
+
+			sc_service::spawn_tasks(sc_service::SpawnTasksParams {
+				rpc_builder,
+				client: client.clone(),
+				transaction_pool: transaction_pool.clone(),
+				task_manager: &mut task_manager,
+				config: parachain_config,
+				keystore: params.keystore_container.keystore(),
+				backend: backend.clone(),
+				network: network.clone(),
+				sync_service: sync_service.clone(),
+				system_rpc_tx,
+				tx_handler_controller,
+				telemetry: telemetry.as_mut(),
+			})?;
+
+			if let Some(hwbench) = hwbench {
+				sc_sysinfo::print_hwbench(&hwbench);
+				if validator {
+					warn_if_slow_hardware(&hwbench);
+				}
 
-	Ok((task_manager, client))
-}
+				if let Some(ref mut telemetry) = telemetry {
+					let telemetry_handle = telemetry.handle();
+					task_manager.spawn_handle().spawn(
+						"telemetry_hwbench",
+						None,
+						sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench),
+					);
+				}
+			}
 
-/// Build the import queue for Aura-based runtimes.
-pub fn build_aura_import_queue(
-	client: Arc<ParachainClient<FakeRuntimeApi>>,
-	block_import: ParachainBlockImport<FakeRuntimeApi>,
-	config: &Configuration,
-	telemetry: Option<TelemetryHandle>,
-	task_manager: &TaskManager,
-) -> Result<sc_consensus::DefaultImportQueue<Block>, sc_service::Error> {
-	let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?;
-
-	cumulus_client_consensus_aura::import_queue::<
-		sp_consensus_aura::sr25519::AuthorityPair,
-		_,
-		_,
-		_,
-		_,
-		_,
-	>(cumulus_client_consensus_aura::ImportQueueParams {
-		block_import,
-		client,
-		create_inherent_data_providers: move |_, _| async move {
-			let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
-
-			let slot =
-				sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
-					*timestamp,
-					slot_duration,
-				);
-
-			Ok((slot, timestamp))
-		},
-		registry: config.prometheus_registry(),
-		spawner: &task_manager.spawn_essential_handle(),
-		telemetry,
-	})
-	.map_err(Into::into)
-}
+			let announce_block = {
+				let sync_service = sync_service.clone();
+				Arc::new(move |hash, data| sync_service.announce_block(hash, data))
+			};
+
+			let relay_chain_slot_duration = Duration::from_secs(6);
+
+			let overseer_handle = relay_chain_interface
+				.overseer_handle()
+				.map_err(|e| sc_service::Error::Application(Box::new(e)))?;
+
+			start_relay_chain_tasks(StartRelayChainTasksParams {
+				client: client.clone(),
+				announce_block: announce_block.clone(),
+				para_id,
+				relay_chain_interface: relay_chain_interface.clone(),
+				task_manager: &mut task_manager,
+				da_recovery_profile: if validator {
+					DARecoveryProfile::Collator
+				} else {
+					DARecoveryProfile::FullNode
+				},
+				import_queue: import_queue_service,
+				relay_chain_slot_duration,
+				recovery_handle: Box::new(overseer_handle.clone()),
+				sync_service,
+			})?;
+
+			if validator {
+				Self::StartConsensus::start_consensus(
+					client.clone(),
+					block_import,
+					prometheus_registry.as_ref(),
+					telemetry.as_ref().map(|t| t.handle()),
+					&task_manager,
+					relay_chain_interface.clone(),
+					transaction_pool,
+					params.keystore_container.keystore(),
+					relay_chain_slot_duration,
+					para_id,
+					collator_key.expect("Command line arguments do not allow this. qed"),
+					overseer_handle,
+					announce_block,
+					backend.clone(),
+				)?;
+			}
 
-/// Start a rococo parachain node.
-pub async fn start_rococo_parachain_node<Net: NetworkBackend<Block, Hash>>(
-	parachain_config: Configuration,
-	polkadot_config: Configuration,
-	collator_options: CollatorOptions,
-	para_id: ParaId,
-	use_experimental_slot_based: bool,
-	hwbench: Option<sc_sysinfo::HwBench>,
-) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<FakeRuntimeApi>>)> {
-	let consensus_starter = if use_experimental_slot_based {
-		start_slot_based_aura_consensus::<_, AuraId>
-	} else {
-		start_lookahead_aura_consensus::<_, AuraId>
-	};
-	start_node_impl::<FakeRuntimeApi, _, _, _, Net>(
-		parachain_config,
-		polkadot_config,
-		collator_options,
-		CollatorSybilResistance::Resistant, // Aura
-		para_id,
-		build_parachain_rpc_extensions::<FakeRuntimeApi>,
-		build_aura_import_queue,
-		consensus_starter,
-		hwbench,
-	)
-	.await
-}
+			start_network.start_network();
 
-/// Build the import queue for the shell runtime.
-pub fn build_shell_import_queue(
-	client: Arc<ParachainClient<FakeRuntimeApi>>,
-	block_import: ParachainBlockImport<FakeRuntimeApi>,
-	config: &Configuration,
-	_: Option<TelemetryHandle>,
-	task_manager: &TaskManager,
-) -> Result<sc_consensus::DefaultImportQueue<Block>, sc_service::Error> {
-	cumulus_client_consensus_relay_chain::import_queue(
-		client,
-		block_import,
-		|_, _| async { Ok(()) },
-		&task_manager.spawn_essential_handle(),
-		config.prometheus_registry(),
-	)
-	.map_err(Into::into)
+			Ok(task_manager)
+		})
+	}
 }
 
-fn build_parachain_rpc_extensions<RuntimeApi>(
-	deny_unsafe: sc_rpc::DenyUnsafe,
-	client: Arc<ParachainClient<RuntimeApi>>,
-	backend: Arc<ParachainBackend>,
-	pool: Arc<sc_transaction_pool::FullPool<Block, ParachainClient<RuntimeApi>>>,
-) -> Result<jsonrpsee::RpcModule<()>, sc_service::Error>
-where
-	RuntimeApi: ConstructRuntimeApi<Block, ParachainClient<RuntimeApi>> + Send + Sync + 'static,
-	RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
-		+ sp_block_builder::BlockBuilder<Block>
-		+ pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, Balance>
-		+ substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
-{
-	let deps = rpc::FullDeps { client, pool, deny_unsafe };
-
-	rpc::create_full(deps, backend).map_err(Into::into)
+/// Build the import queue for the shell runtime.
+pub(crate) struct BuildShellImportQueue<RuntimeApi>(PhantomData<RuntimeApi>);
+
+impl BuildImportQueue<FakeRuntimeApi> for BuildShellImportQueue<FakeRuntimeApi> {
+	fn build_import_queue(
+		client: Arc<ParachainClient<FakeRuntimeApi>>,
+		block_import: ParachainBlockImport<FakeRuntimeApi>,
+		config: &Configuration,
+		_telemetry_handle: Option<TelemetryHandle>,
+		task_manager: &TaskManager,
+	) -> sc_service::error::Result<DefaultImportQueue<Block>> {
+		cumulus_client_consensus_relay_chain::import_queue(
+			client,
+			block_import,
+			|_, _| async { Ok(()) },
+			&task_manager.spawn_essential_handle(),
+			config.prometheus_registry(),
+		)
+		.map_err(Into::into)
+	}
 }
 
-fn build_contracts_rpc_extensions(
-	deny_unsafe: sc_rpc::DenyUnsafe,
-	client: Arc<ParachainClient<FakeRuntimeApi>>,
-	_backend: Arc<ParachainBackend>,
-	pool: Arc<sc_transaction_pool::FullPool<Block, ParachainClient<FakeRuntimeApi>>>,
-) -> Result<jsonrpsee::RpcModule<()>, sc_service::Error> {
-	let deps = crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe };
+pub(crate) struct ShellNode;
 
-	crate::rpc::create_contracts_rococo(deps).map_err(Into::into)
-}
+impl NodeSpec for ShellNode {
+	type RuntimeApi = FakeRuntimeApi;
+	type BuildImportQueue = BuildShellImportQueue<Self::RuntimeApi>;
+	type BuildRpcExtensions = BuildEmptyRpcExtensions<Self::RuntimeApi>;
+	type StartConsensus = StartRelayChainConsensus;
 
-/// Start a polkadot-shell parachain node.
-pub async fn start_shell_node<Net: NetworkBackend<Block, Hash>>(
-	parachain_config: Configuration,
-	polkadot_config: Configuration,
-	collator_options: CollatorOptions,
-	para_id: ParaId,
-	hwbench: Option<sc_sysinfo::HwBench>,
-) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<FakeRuntimeApi>>)> {
-	start_node_impl::<FakeRuntimeApi, _, _, _, Net>(
-		parachain_config,
-		polkadot_config,
-		collator_options,
-		CollatorSybilResistance::Unresistant, // free-for-all consensus
-		para_id,
-		|_, _, _, _| Ok(RpcModule::new(())),
-		build_shell_import_queue,
-		start_relay_chain_consensus,
-		hwbench,
-	)
-	.await
+	const SYBIL_RESISTANCE: CollatorSybilResistance = CollatorSybilResistance::Unresistant;
 }
 
 struct Verifier<Client, AuraId> {
@@ -527,435 +428,374 @@ where
 
 /// Build the import queue for parachain runtimes that started with relay chain consensus and
 /// switched to aura.
-pub fn build_relay_to_aura_import_queue<RuntimeApi, AuraId>(
-	client: Arc<ParachainClient<RuntimeApi>>,
-	block_import: ParachainBlockImport<RuntimeApi>,
-	config: &Configuration,
-	telemetry_handle: Option<TelemetryHandle>,
-	task_manager: &TaskManager,
-) -> Result<sc_consensus::DefaultImportQueue<Block>, sc_service::Error>
+pub(crate) struct BuildRelayToAuraImportQueue<RuntimeApi, AuraId>(
+	PhantomData<(RuntimeApi, AuraId)>,
+);
+
+impl<RuntimeApi, AuraId> BuildImportQueue<RuntimeApi>
+	for BuildRelayToAuraImportQueue<RuntimeApi, AuraId>
 where
 	RuntimeApi: ConstructNodeRuntimeApi<Block, ParachainClient<RuntimeApi>>,
 	RuntimeApi::RuntimeApi: AuraRuntimeApi<Block, AuraId>,
 	AuraId: AuraIdT + Sync,
 {
-	let verifier_client = client.clone();
-
-	let aura_verifier = cumulus_client_consensus_aura::build_verifier::<
-		<AuraId as AppCrypto>::Pair,
-		_,
-		_,
-		_,
-	>(cumulus_client_consensus_aura::BuildVerifierParams {
-		client: verifier_client.clone(),
-		create_inherent_data_providers: move |parent_hash, _| {
-			let cidp_client = verifier_client.clone();
-			async move {
-				let slot_duration =
-					cumulus_client_consensus_aura::slot_duration_at(&*cidp_client, parent_hash)?;
-				let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
-
-				let slot =
-					sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
-						*timestamp,
-						slot_duration,
-					);
+	fn build_import_queue(
+		client: Arc<ParachainClient<RuntimeApi>>,
+		block_import: ParachainBlockImport<RuntimeApi>,
+		config: &Configuration,
+		telemetry_handle: Option<TelemetryHandle>,
+		task_manager: &TaskManager,
+	) -> sc_service::error::Result<DefaultImportQueue<Block>> {
+		let verifier_client = client.clone();
+
+		let aura_verifier =
+			cumulus_client_consensus_aura::build_verifier::<<AuraId as AppCrypto>::Pair, _, _, _>(
+				cumulus_client_consensus_aura::BuildVerifierParams {
+					client: verifier_client.clone(),
+					create_inherent_data_providers: move |parent_hash, _| {
+						let cidp_client = verifier_client.clone();
+						async move {
+							let slot_duration = cumulus_client_consensus_aura::slot_duration_at(
+								&*cidp_client,
+								parent_hash,
+							)?;
+							let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
+
+							let slot =
+						sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
+							*timestamp,
+							slot_duration,
+						);
+
+							Ok((slot, timestamp))
+						}
+					},
+					telemetry: telemetry_handle,
+				},
+			);
 
-				Ok((slot, timestamp))
-			}
-		},
-		telemetry: telemetry_handle,
-	});
-
-	let relay_chain_verifier =
-		Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) })) as Box<_>;
-
-	let verifier = Verifier {
-		client,
-		relay_chain_verifier,
-		aura_verifier: Box::new(aura_verifier),
-		_phantom: PhantomData,
-	};
+		let relay_chain_verifier =
+			Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) }));
+
+		let verifier = Verifier {
+			client,
+			relay_chain_verifier,
+			aura_verifier: Box::new(aura_verifier),
+			_phantom: PhantomData,
+		};
 
-	let registry = config.prometheus_registry();
-	let spawner = task_manager.spawn_essential_handle();
+		let registry = config.prometheus_registry();
+		let spawner = task_manager.spawn_essential_handle();
 
-	Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry))
+		Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry))
+	}
 }
 
 /// Uses the lookahead collator to support async backing.
 ///
 /// Start an aura powered parachain node. Some system chains use this.
-pub async fn start_generic_aura_async_backing_node<Net: NetworkBackend<Block, Hash>>(
-	parachain_config: Configuration,
-	polkadot_config: Configuration,
-	collator_options: CollatorOptions,
-	para_id: ParaId,
-	use_experimental_slot_based: bool,
-	hwbench: Option<sc_sysinfo::HwBench>,
-) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<FakeRuntimeApi>>)> {
-	let consensus_starter = if use_experimental_slot_based {
-		start_slot_based_aura_consensus::<_, AuraId>
-	} else {
-		start_lookahead_aura_consensus::<_, AuraId>
-	};
-	start_node_impl::<FakeRuntimeApi, _, _, _, Net>(
-		parachain_config,
-		polkadot_config,
-		collator_options,
-		CollatorSybilResistance::Resistant, // Aura
-		para_id,
-		build_parachain_rpc_extensions::<FakeRuntimeApi>,
-		build_relay_to_aura_import_queue::<_, AuraId>,
-		consensus_starter,
-		hwbench,
-	)
-	.await
+pub(crate) struct AuraNode<RuntimeApi, AuraId, StartConsensus>(
+	pub PhantomData<(RuntimeApi, AuraId, StartConsensus)>,
+);
+
+impl<RuntimeApi, AuraId, StartConsensus> Default for AuraNode<RuntimeApi, AuraId, StartConsensus> {
+	fn default() -> Self {
+		Self(Default::default())
+	}
 }
 
-/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub
-/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and
-/// needs to sync and upgrade before it can run `AuraApi` functions.
-///
-/// Uses the lookahead collator to support async backing.
-#[sc_tracing::logging::prefix_logs_with("Parachain")]
-pub async fn start_asset_hub_async_backing_node<RuntimeApi, AuraId, Net>(
-	parachain_config: Configuration,
-	polkadot_config: Configuration,
-	collator_options: CollatorOptions,
-	para_id: ParaId,
-	use_experimental_slot_based: bool,
-	hwbench: Option<sc_sysinfo::HwBench>,
-) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<RuntimeApi>>)>
+impl<RuntimeApi, AuraId, StartConsensus> NodeSpec for AuraNode<RuntimeApi, AuraId, StartConsensus>
 where
 	RuntimeApi: ConstructNodeRuntimeApi<Block, ParachainClient<RuntimeApi>>,
 	RuntimeApi::RuntimeApi: AuraRuntimeApi<Block, AuraId>
 		+ pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, Balance>
 		+ substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
 	AuraId: AuraIdT + Sync,
-	Net: NetworkBackend<Block, Hash>,
+	StartConsensus: self::StartConsensus<RuntimeApi> + 'static,
 {
-	let consensus_starter = if use_experimental_slot_based {
-		start_slot_based_aura_consensus::<_, AuraId>
-	} else {
-		start_lookahead_aura_consensus::<_, AuraId>
-	};
-
-	start_node_impl::<RuntimeApi, _, _, _, Net>(
-		parachain_config,
-		polkadot_config,
-		collator_options,
-		CollatorSybilResistance::Resistant, // Aura
-		para_id,
-		build_parachain_rpc_extensions,
-		build_relay_to_aura_import_queue::<_, AuraId>,
-		consensus_starter,
-		hwbench,
-	)
-	.await
+	type RuntimeApi = RuntimeApi;
+	type BuildImportQueue = BuildRelayToAuraImportQueue<RuntimeApi, AuraId>;
+	type BuildRpcExtensions = BuildParachainRpcExtensions<RuntimeApi>;
+	type StartConsensus = StartConsensus;
+	const SYBIL_RESISTANCE: CollatorSybilResistance = CollatorSybilResistance::Resistant;
 }
 
-/// Wait for the Aura runtime API to appear on chain.
-/// This is useful for chains that started out without Aura. Components that
-/// are depending on Aura functionality will wait until Aura appears in the runtime.
-async fn wait_for_aura<RuntimeApi, AuraId>(client: Arc<ParachainClient<RuntimeApi>>)
+pub fn new_aura_node_spec<RuntimeApi, AuraId>(extra_args: NodeExtraArgs) -> Box<dyn DynNodeSpec>
 where
 	RuntimeApi: ConstructNodeRuntimeApi<Block, ParachainClient<RuntimeApi>>,
-	RuntimeApi::RuntimeApi: AuraRuntimeApi<Block, AuraId>,
+	RuntimeApi::RuntimeApi: AuraRuntimeApi<Block, AuraId>
+		+ pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, Balance>
+		+ substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
 	AuraId: AuraIdT + Sync,
 {
-	let finalized_hash = client.chain_info().finalized_hash;
-	if client
-		.runtime_api()
-		.has_api::<dyn AuraApi<Block, AuraId>>(finalized_hash)
-		.unwrap_or(false)
-	{
-		return;
-	};
-
-	let mut stream = client.finality_notification_stream();
-	while let Some(notification) = stream.next().await {
-		let has_aura_api = client
-			.runtime_api()
-			.has_api::<dyn AuraApi<Block, AuraId>>(notification.hash)
-			.unwrap_or(false);
-		if has_aura_api {
-			return;
-		}
+	if extra_args.use_slot_based_consensus {
+		Box::new(AuraNode::<
+			RuntimeApi,
+			AuraId,
+			StartSlotBasedAuraConsensus<RuntimeApi, AuraId>,
+		>::default())
+	} else {
+		Box::new(AuraNode::<
+			RuntimeApi,
+			AuraId,
+			StartLookaheadAuraConsensus<RuntimeApi, AuraId>,
+		>::default())
 	}
 }
 
 /// Start relay-chain consensus that is free for all. Everyone can submit a block, the relay-chain
 /// decides what is backed and included.
-fn start_relay_chain_consensus(
-	client: Arc<ParachainClient<FakeRuntimeApi>>,
-	block_import: ParachainBlockImport<FakeRuntimeApi>,
-	prometheus_registry: Option<&Registry>,
-	telemetry: Option<TelemetryHandle>,
-	task_manager: &TaskManager,
-	relay_chain_interface: Arc<dyn RelayChainInterface>,
-	transaction_pool: Arc<sc_transaction_pool::FullPool<Block, ParachainClient<FakeRuntimeApi>>>,
-	_keystore: KeystorePtr,
-	_relay_chain_slot_duration: Duration,
-	para_id: ParaId,
-	collator_key: CollatorPair,
-	overseer_handle: OverseerHandle,
-	announce_block: Arc<dyn Fn(Hash, Option<Vec<u8>>) + Send + Sync>,
-	_backend: Arc<ParachainBackend>,
-) -> Result<(), sc_service::Error> {
-	let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
-		task_manager.spawn_handle(),
-		client.clone(),
-		transaction_pool,
-		prometheus_registry,
-		telemetry,
-	);
-
-	let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus(
-		cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams {
-			para_id,
-			proposer_factory,
-			block_import,
-			relay_chain_interface: relay_chain_interface.clone(),
-			create_inherent_data_providers: move |_, (relay_parent, validation_data)| {
-				let relay_chain_interface = relay_chain_interface.clone();
-				async move {
-					let parachain_inherent =
+pub(crate) struct StartRelayChainConsensus;
+
+impl StartConsensus<FakeRuntimeApi> for StartRelayChainConsensus {
+	fn start_consensus(
+		client: Arc<ParachainClient<FakeRuntimeApi>>,
+		block_import: ParachainBlockImport<FakeRuntimeApi>,
+		prometheus_registry: Option<&Registry>,
+		telemetry: Option<TelemetryHandle>,
+		task_manager: &TaskManager,
+		relay_chain_interface: Arc<dyn RelayChainInterface>,
+		transaction_pool: Arc<FullPool<Block, ParachainClient<FakeRuntimeApi>>>,
+		_keystore: KeystorePtr,
+		_relay_chain_slot_duration: Duration,
+		para_id: ParaId,
+		collator_key: CollatorPair,
+		overseer_handle: OverseerHandle,
+		announce_block: Arc<dyn Fn(Hash, Option<Vec<u8>>) + Send + Sync>,
+		_backend: Arc<ParachainBackend>,
+	) -> Result<(), Error> {
+		let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
+			task_manager.spawn_handle(),
+			client.clone(),
+			transaction_pool,
+			prometheus_registry,
+			telemetry,
+		);
+
+		let free_for_all = cumulus_client_consensus_relay_chain::build_relay_chain_consensus(
+			cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams {
+				para_id,
+				proposer_factory,
+				block_import,
+				relay_chain_interface: relay_chain_interface.clone(),
+				create_inherent_data_providers: move |_, (relay_parent, validation_data)| {
+					let relay_chain_interface = relay_chain_interface.clone();
+					async move {
+						let parachain_inherent =
 							cumulus_client_parachain_inherent::ParachainInherentDataProvider::create_at(
 								relay_parent,
 								&relay_chain_interface,
 								&validation_data,
 								para_id,
 							).await;
-					let parachain_inherent = parachain_inherent.ok_or_else(|| {
-						Box::<dyn std::error::Error + Send + Sync>::from(
-							"Failed to create parachain inherent",
-						)
-					})?;
-					Ok(parachain_inherent)
-				}
+						let parachain_inherent = parachain_inherent.ok_or_else(|| {
+							Box::<dyn std::error::Error + Send + Sync>::from(
+								"Failed to create parachain inherent",
+							)
+						})?;
+						Ok(parachain_inherent)
+					}
+				},
 			},
-		},
-	);
-
-	let spawner = task_manager.spawn_handle();
-
-	// Required for free-for-all consensus
-	#[allow(deprecated)]
-	old_consensus::start_collator_sync(old_consensus::StartCollatorParams {
-		para_id,
-		block_status: client.clone(),
-		announce_block,
-		overseer_handle,
-		spawner,
-		key: collator_key,
-		parachain_consensus: free_for_all,
-		runtime_api: client.clone(),
-	});
-
-	Ok(())
+		);
+
+		let spawner = task_manager.spawn_handle();
+
+		// Required for free-for-all consensus
+		#[allow(deprecated)]
+		old_consensus::start_collator_sync(old_consensus::StartCollatorParams {
+			para_id,
+			block_status: client.clone(),
+			announce_block,
+			overseer_handle,
+			spawner,
+			key: collator_key,
+			parachain_consensus: free_for_all,
+			runtime_api: client.clone(),
+		});
+
+		Ok(())
+	}
 }
 
 /// Start consensus using the lookahead aura collator.
-fn start_lookahead_aura_consensus<RuntimeApi, AuraId>(
-	client: Arc<ParachainClient<RuntimeApi>>,
-	block_import: ParachainBlockImport<RuntimeApi>,
-	prometheus_registry: Option<&Registry>,
-	telemetry: Option<TelemetryHandle>,
-	task_manager: &TaskManager,
-	relay_chain_interface: Arc<dyn RelayChainInterface>,
-	transaction_pool: Arc<sc_transaction_pool::FullPool<Block, ParachainClient<RuntimeApi>>>,
-	keystore: KeystorePtr,
-	relay_chain_slot_duration: Duration,
-	para_id: ParaId,
-	collator_key: CollatorPair,
-	overseer_handle: OverseerHandle,
-	announce_block: Arc<dyn Fn(Hash, Option<Vec<u8>>) + Send + Sync>,
-	backend: Arc<ParachainBackend>,
-) -> Result<(), sc_service::Error>
+pub(crate) struct StartSlotBasedAuraConsensus<RuntimeApi, AuraId>(
+	PhantomData<(RuntimeApi, AuraId)>,
+);
+
+impl<RuntimeApi, AuraId> StartConsensus<RuntimeApi>
+	for StartSlotBasedAuraConsensus<RuntimeApi, AuraId>
 where
 	RuntimeApi: ConstructNodeRuntimeApi<Block, ParachainClient<RuntimeApi>>,
 	RuntimeApi::RuntimeApi: AuraRuntimeApi<Block, AuraId>,
 	AuraId: AuraIdT + Sync,
 {
-	let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
-		task_manager.spawn_handle(),
-		client.clone(),
-		transaction_pool,
-		prometheus_registry,
-		telemetry.clone(),
-	);
-
-	let collator_service = CollatorService::new(
-		client.clone(),
-		Arc::new(task_manager.spawn_handle()),
-		announce_block,
-		client.clone(),
-	);
-
-	let params = AuraParams {
-		create_inherent_data_providers: move |_, ()| async move { Ok(()) },
-		block_import,
-		para_client: client.clone(),
-		para_backend: backend,
-		relay_client: relay_chain_interface,
-		code_hash_provider: {
-			let client = client.clone();
-			move |block_hash| {
-				client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash())
-			}
-		},
-		keystore,
-		collator_key,
-		para_id,
-		overseer_handle,
-		relay_chain_slot_duration,
-		proposer: Proposer::new(proposer_factory),
-		collator_service,
-		authoring_duration: Duration::from_millis(1500),
-		reinitialize: false,
-	};
+	fn start_consensus(
+		client: Arc<ParachainClient<RuntimeApi>>,
+		block_import: ParachainBlockImport<RuntimeApi>,
+		prometheus_registry: Option<&Registry>,
+		telemetry: Option<TelemetryHandle>,
+		task_manager: &TaskManager,
+		relay_chain_interface: Arc<dyn RelayChainInterface>,
+		transaction_pool: Arc<FullPool<Block, ParachainClient<RuntimeApi>>>,
+		keystore: KeystorePtr,
+		relay_chain_slot_duration: Duration,
+		para_id: ParaId,
+		collator_key: CollatorPair,
+		_overseer_handle: OverseerHandle,
+		announce_block: Arc<dyn Fn(Hash, Option<Vec<u8>>) + Send + Sync>,
+		backend: Arc<ParachainBackend>,
+	) -> Result<(), Error> {
+		let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
+			task_manager.spawn_handle(),
+			client.clone(),
+			transaction_pool,
+			prometheus_registry,
+			telemetry.clone(),
+		);
 
-	let fut = async move {
-		wait_for_aura(client).await;
-		aura::run::<Block, <AuraId as AppCrypto>::Pair, _, _, _, _, _, _, _, _>(params).await;
-	};
-	task_manager.spawn_essential_handle().spawn("aura", None, fut);
+		let proposer = Proposer::new(proposer_factory);
+		let collator_service = CollatorService::new(
+			client.clone(),
+			Arc::new(task_manager.spawn_handle()),
+			announce_block,
+			client.clone(),
+		);
 
-	Ok(())
+		let client_for_aura = client.clone();
+		let params = SlotBasedParams {
+			create_inherent_data_providers: move |_, ()| async move { Ok(()) },
+			block_import,
+			para_client: client.clone(),
+			para_backend: backend.clone(),
+			relay_client: relay_chain_interface,
+			code_hash_provider: move |block_hash| {
+				client_for_aura.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash())
+			},
+			keystore,
+			collator_key,
+			para_id,
+			relay_chain_slot_duration,
+			proposer,
+			collator_service,
+			authoring_duration: Duration::from_millis(2000),
+			reinitialize: false,
+			slot_drift: Duration::from_secs(1),
+		};
+
+		let (collation_future, block_builder_future) =
+			slot_based::run::<Block, <AuraId as AppCrypto>::Pair, _, _, _, _, _, _, _, _>(params);
+
+		task_manager.spawn_essential_handle().spawn(
+			"collation-task",
+			Some("parachain-block-authoring"),
+			collation_future,
+		);
+		task_manager.spawn_essential_handle().spawn(
+			"block-builder-task",
+			Some("parachain-block-authoring"),
+			block_builder_future,
+		);
+		Ok(())
+	}
 }
 
-/// Start consensus using the lookahead aura collator.
-fn start_slot_based_aura_consensus<RuntimeApi, AuraId>(
-	client: Arc<ParachainClient<RuntimeApi>>,
-	block_import: ParachainBlockImport<RuntimeApi>,
-	prometheus_registry: Option<&Registry>,
-	telemetry: Option<TelemetryHandle>,
-	task_manager: &TaskManager,
-	relay_chain_interface: Arc<dyn RelayChainInterface>,
-	transaction_pool: Arc<sc_transaction_pool::FullPool<Block, ParachainClient<RuntimeApi>>>,
-	keystore: KeystorePtr,
-	relay_chain_slot_duration: Duration,
-	para_id: ParaId,
-	collator_key: CollatorPair,
-	_overseer_handle: OverseerHandle,
-	announce_block: Arc<dyn Fn(Hash, Option<Vec<u8>>) + Send + Sync>,
-	backend: Arc<ParachainBackend>,
-) -> Result<(), sc_service::Error>
+/// Wait for the Aura runtime API to appear on chain.
+/// This is useful for chains that started out without Aura. Components that
+/// are depending on Aura functionality will wait until Aura appears in the runtime.
+async fn wait_for_aura<RuntimeApi, AuraId>(client: Arc<ParachainClient<RuntimeApi>>)
 where
 	RuntimeApi: ConstructNodeRuntimeApi<Block, ParachainClient<RuntimeApi>>,
 	RuntimeApi::RuntimeApi: AuraRuntimeApi<Block, AuraId>,
 	AuraId: AuraIdT + Sync,
 {
-	let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
-		task_manager.spawn_handle(),
-		client.clone(),
-		transaction_pool,
-		prometheus_registry,
-		telemetry.clone(),
-	);
-
-	let proposer = Proposer::new(proposer_factory);
-	let collator_service = CollatorService::new(
-		client.clone(),
-		Arc::new(task_manager.spawn_handle()),
-		announce_block,
-		client.clone(),
-	);
-
-	let client_for_aura = client.clone();
-	let params = SlotBasedParams {
-		create_inherent_data_providers: move |_, ()| async move { Ok(()) },
-		block_import,
-		para_client: client.clone(),
-		para_backend: backend.clone(),
-		relay_client: relay_chain_interface,
-		code_hash_provider: move |block_hash| {
-			client_for_aura.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash())
-		},
-		keystore,
-		collator_key,
-		para_id,
-		relay_chain_slot_duration,
-		proposer,
-		collator_service,
-		authoring_duration: Duration::from_millis(2000),
-		reinitialize: false,
-		slot_drift: Duration::from_secs(1),
+	let finalized_hash = client.chain_info().finalized_hash;
+	if client.runtime_api().has_aura_api(finalized_hash) {
+		return;
 	};
 
-	let (collation_future, block_builder_future) =
-		slot_based::run::<Block, <AuraId as AppCrypto>::Pair, _, _, _, _, _, _, _, _>(params);
-
-	task_manager.spawn_essential_handle().spawn(
-		"collation-task",
-		Some("parachain-block-authoring"),
-		collation_future,
-	);
-	task_manager.spawn_essential_handle().spawn(
-		"block-builder-task",
-		Some("parachain-block-authoring"),
-		block_builder_future,
-	);
-	Ok(())
+	let mut stream = client.finality_notification_stream();
+	while let Some(notification) = stream.next().await {
+		if client.runtime_api().has_aura_api(notification.hash) {
+			return;
+		}
+	}
 }
 
-/// Start an aura powered parachain node which uses the lookahead collator to support async backing.
-/// This node is basic in the sense that its runtime api doesn't include common contents such as
-/// transaction payment. Used for aura glutton.
-pub async fn start_basic_async_backing_node<Net: NetworkBackend<Block, Hash>>(
-	parachain_config: Configuration,
-	polkadot_config: Configuration,
-	collator_options: CollatorOptions,
-	para_id: ParaId,
-	use_experimental_slot_based: bool,
-	hwbench: Option<sc_sysinfo::HwBench>,
-) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<FakeRuntimeApi>>)> {
-	let consensus_starter = if use_experimental_slot_based {
-		start_slot_based_aura_consensus::<_, AuraId>
-	} else {
-		start_lookahead_aura_consensus::<_, AuraId>
-	};
-	start_node_impl::<FakeRuntimeApi, _, _, _, Net>(
-		parachain_config,
-		polkadot_config,
-		collator_options,
-		CollatorSybilResistance::Resistant, // Aura
-		para_id,
-		|_, _, _, _| Ok(RpcModule::new(())),
-		build_relay_to_aura_import_queue::<_, AuraId>,
-		consensus_starter,
-		hwbench,
-	)
-	.await
-}
+/// Start consensus using the lookahead aura collator.
+pub(crate) struct StartLookaheadAuraConsensus<RuntimeApi, AuraId>(
+	PhantomData<(RuntimeApi, AuraId)>,
+);
 
-/// Start a parachain node for Rococo Contracts.
-pub async fn start_contracts_rococo_node<Net: NetworkBackend<Block, Hash>>(
-	parachain_config: Configuration,
-	polkadot_config: Configuration,
-	collator_options: CollatorOptions,
-	para_id: ParaId,
-	use_experimental_slot_based: bool,
-	hwbench: Option<sc_sysinfo::HwBench>,
-) -> sc_service::error::Result<(TaskManager, Arc<ParachainClient<FakeRuntimeApi>>)> {
-	let consensus_starter = if use_experimental_slot_based {
-		start_slot_based_aura_consensus::<_, AuraId>
-	} else {
-		start_lookahead_aura_consensus::<_, AuraId>
-	};
-	start_node_impl::<FakeRuntimeApi, _, _, _, Net>(
-		parachain_config,
-		polkadot_config,
-		collator_options,
-		CollatorSybilResistance::Resistant, // Aura
-		para_id,
-		build_contracts_rpc_extensions,
-		build_aura_import_queue,
-		consensus_starter,
-		hwbench,
-	)
-	.await
+impl<RuntimeApi, AuraId> StartConsensus<RuntimeApi>
+	for StartLookaheadAuraConsensus<RuntimeApi, AuraId>
+where
+	RuntimeApi: ConstructNodeRuntimeApi<Block, ParachainClient<RuntimeApi>>,
+	RuntimeApi::RuntimeApi: AuraRuntimeApi<Block, AuraId>,
+	AuraId: AuraIdT + Sync,
+{
+	fn start_consensus(
+		client: Arc<ParachainClient<RuntimeApi>>,
+		block_import: ParachainBlockImport<RuntimeApi>,
+		prometheus_registry: Option<&Registry>,
+		telemetry: Option<TelemetryHandle>,
+		task_manager: &TaskManager,
+		relay_chain_interface: Arc<dyn RelayChainInterface>,
+		transaction_pool: Arc<FullPool<Block, ParachainClient<RuntimeApi>>>,
+		keystore: KeystorePtr,
+		relay_chain_slot_duration: Duration,
+		para_id: ParaId,
+		collator_key: CollatorPair,
+		overseer_handle: OverseerHandle,
+		announce_block: Arc<dyn Fn(Hash, Option<Vec<u8>>) + Send + Sync>,
+		backend: Arc<ParachainBackend>,
+	) -> Result<(), Error> {
+		let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
+			task_manager.spawn_handle(),
+			client.clone(),
+			transaction_pool,
+			prometheus_registry,
+			telemetry.clone(),
+		);
+
+		let collator_service = CollatorService::new(
+			client.clone(),
+			Arc::new(task_manager.spawn_handle()),
+			announce_block,
+			client.clone(),
+		);
+
+		let params = AuraParams {
+			create_inherent_data_providers: move |_, ()| async move { Ok(()) },
+			block_import,
+			para_client: client.clone(),
+			para_backend: backend,
+			relay_client: relay_chain_interface,
+			code_hash_provider: {
+				let client = client.clone();
+				move |block_hash| {
+					client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash())
+				}
+			},
+			keystore,
+			collator_key,
+			para_id,
+			overseer_handle,
+			relay_chain_slot_duration,
+			proposer: Proposer::new(proposer_factory),
+			collator_service,
+			authoring_duration: Duration::from_millis(1500),
+			reinitialize: false,
+		};
+
+		let fut = async move {
+			wait_for_aura(client).await;
+			aura::run::<Block, <AuraId as AppCrypto>::Pair, _, _, _, _, _, _, _, _>(params).await;
+		};
+		task_manager.spawn_essential_handle().spawn("aura", None, fut);
+
+		Ok(())
+	}
 }
 
 /// Checks that the hardware meets the requirements and print a warning otherwise.
@@ -970,3 +810,177 @@ fn warn_if_slow_hardware(hwbench: &sc_sysinfo::HwBench) {
 		);
 	}
 }
+
+type SyncCmdResult = sc_cli::Result<()>;
+
+type AsyncCmdResult<'a> =
+	sc_cli::Result<(Pin<Box<dyn Future<Output = SyncCmdResult> + 'a>>, TaskManager)>;
+
+pub(crate) trait DynNodeSpec {
+	fn prepare_check_block_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &CheckBlockCmd,
+	) -> AsyncCmdResult<'_>;
+
+	fn prepare_export_blocks_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &ExportBlocksCmd,
+	) -> AsyncCmdResult<'_>;
+
+	fn prepare_export_state_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &ExportStateCmd,
+	) -> AsyncCmdResult<'_>;
+
+	fn prepare_import_blocks_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &ImportBlocksCmd,
+	) -> AsyncCmdResult<'_>;
+
+	fn prepare_revert_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &RevertCmd,
+	) -> AsyncCmdResult<'_>;
+
+	fn run_export_genesis_head_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &ExportGenesisHeadCommand,
+	) -> SyncCmdResult;
+
+	fn run_benchmark_block_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &BlockCmd,
+	) -> SyncCmdResult;
+
+	#[cfg(any(feature = "runtime-benchmarks"))]
+	fn run_benchmark_storage_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &StorageCmd,
+	) -> SyncCmdResult;
+
+	fn start_node(
+		self: Box<Self>,
+		parachain_config: Configuration,
+		polkadot_config: Configuration,
+		collator_options: CollatorOptions,
+		para_id: ParaId,
+		hwbench: Option<sc_sysinfo::HwBench>,
+	) -> Pin<Box<dyn Future<Output = sc_service::error::Result<TaskManager>>>>;
+}
+
+impl<T> DynNodeSpec for T
+where
+	T: NodeSpec,
+{
+	fn prepare_check_block_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &CheckBlockCmd,
+	) -> AsyncCmdResult<'_> {
+		let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?;
+		Ok((Box::pin(cmd.run(partial.client, partial.import_queue)), partial.task_manager))
+	}
+
+	fn prepare_export_blocks_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &ExportBlocksCmd,
+	) -> AsyncCmdResult<'_> {
+		let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?;
+		Ok((Box::pin(cmd.run(partial.client, config.database)), partial.task_manager))
+	}
+
+	fn prepare_export_state_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &ExportStateCmd,
+	) -> AsyncCmdResult<'_> {
+		let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?;
+		Ok((Box::pin(cmd.run(partial.client, config.chain_spec)), partial.task_manager))
+	}
+
+	fn prepare_import_blocks_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &ImportBlocksCmd,
+	) -> AsyncCmdResult<'_> {
+		let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?;
+		Ok((Box::pin(cmd.run(partial.client, partial.import_queue)), partial.task_manager))
+	}
+
+	fn prepare_revert_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &RevertCmd,
+	) -> AsyncCmdResult<'_> {
+		let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?;
+		Ok((Box::pin(cmd.run(partial.client, partial.backend, None)), partial.task_manager))
+	}
+
+	fn run_export_genesis_head_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &ExportGenesisHeadCommand,
+	) -> SyncCmdResult {
+		let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?;
+		cmd.run(partial.client)
+	}
+
+	fn run_benchmark_block_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &BlockCmd,
+	) -> SyncCmdResult {
+		let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?;
+		cmd.run(partial.client)
+	}
+
+	#[cfg(any(feature = "runtime-benchmarks"))]
+	fn run_benchmark_storage_cmd(
+		self: Box<Self>,
+		config: Configuration,
+		cmd: &StorageCmd,
+	) -> SyncCmdResult {
+		let partial = Self::new_partial(&config).map_err(sc_cli::Error::Service)?;
+		let db = partial.backend.expose_db();
+		let storage = partial.backend.expose_storage();
+
+		cmd.run(config, partial.client, db, storage)
+	}
+
+	fn start_node(
+		self: Box<Self>,
+		parachain_config: Configuration,
+		polkadot_config: Configuration,
+		collator_options: CollatorOptions,
+		para_id: ParaId,
+		hwbench: Option<HwBench>,
+	) -> Pin<Box<dyn Future<Output = sc_service::error::Result<TaskManager>>>> {
+		match parachain_config.network.network_backend {
+			sc_network::config::NetworkBackendType::Libp2p =>
+				<Self as NodeSpec>::start_node::<sc_network::NetworkWorker<_, _>>(
+					parachain_config,
+					polkadot_config,
+					collator_options,
+					para_id,
+					hwbench,
+				),
+			sc_network::config::NetworkBackendType::Litep2p =>
+				<Self as NodeSpec>::start_node::<sc_network::Litep2pNetworkBackend>(
+					parachain_config,
+					polkadot_config,
+					collator_options,
+					para_id,
+					hwbench,
+				),
+		}
+	}
+}
diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs
index b89054b4dc32178b0c42b3f365392e5202b88bc5..62d99122c3012701aa46039b13c3c05d3331d8c3 100644
--- a/polkadot/cli/src/command.rs
+++ b/polkadot/cli/src/command.rs
@@ -192,7 +192,7 @@ where
 	F: FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Configuration),
 {
 	let runner = cli
-		.create_runner_with_logger_hook::<sc_cli::RunCmd, F>(&cli.run.base, logger_hook)
+		.create_runner_with_logger_hook::<_, _, F>(&cli.run.base, logger_hook)
 		.map_err(Error::from)?;
 	let chain_spec = &runner.config().chain_spec;
 
diff --git a/substrate/client/cli/src/lib.rs b/substrate/client/cli/src/lib.rs
index 104e8ec8b798ee5b8eb6c9561c6ef0729d7a7b2a..1bb9fec0e27690f9b7ec21918f3dce329100de3a 100644
--- a/substrate/client/cli/src/lib.rs
+++ b/substrate/client/cli/src/lib.rs
@@ -58,11 +58,11 @@ pub trait SubstrateCli: Sized {
 
 	/// Implementation version.
 	///
-	/// By default this will look like this:
+	/// By default, it will look like this:
 	///
 	/// `2.0.0-b950f731c`
 	///
-	/// Where the hash is the short commit hash of the commit of in the Git repository.
+	/// Where the hash is the short hash of the commit in the Git repository.
 	fn impl_version() -> String;
 
 	/// Executable file name.
@@ -199,17 +199,8 @@ pub trait SubstrateCli: Sized {
 	fn create_runner<T: CliConfiguration<DVC>, DVC: DefaultConfigurationValues>(
 		&self,
 		command: &T,
-	) -> error::Result<Runner<Self>> {
-		let tokio_runtime = build_runtime()?;
-
-		// `capture` needs to be called in a tokio context.
-		// Also capture them as early as possible.
-		let signals = tokio_runtime.block_on(async { Signals::capture() })?;
-
-		let config = command.create_configuration(self, tokio_runtime.handle().clone())?;
-
-		command.init(&Self::support_url(), &Self::impl_version(), |_, _| {}, &config)?;
-		Runner::new(config, tokio_runtime, signals)
+	) -> Result<Runner<Self>> {
+		self.create_runner_with_logger_hook(command, |_, _| {})
 	}
 
 	/// Create a runner for the command provided in argument. The `logger_hook` can be used to setup
@@ -231,11 +222,15 @@ pub trait SubstrateCli: Sized {
 	/// 	}
 	/// }
 	/// ```
-	fn create_runner_with_logger_hook<T: CliConfiguration, F>(
+	fn create_runner_with_logger_hook<
+		T: CliConfiguration<DVC>,
+		DVC: DefaultConfigurationValues,
+		F,
+	>(
 		&self,
 		command: &T,
 		logger_hook: F,
-	) -> error::Result<Runner<Self>>
+	) -> Result<Runner<Self>>
 	where
 		F: FnOnce(&mut LoggerBuilder, &Configuration),
 	{
diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs
index a51bb4012d5d8ac0a4fb39e73c5a872a33e5fa7d..63be296d1b2161eaa8d7f71ea660a9d5a2037ecf 100644
--- a/substrate/client/service/src/lib.rs
+++ b/substrate/client/service/src/lib.rs
@@ -137,7 +137,7 @@ pub struct PartialComponents<Client, Backend, SelectChain, ImportQueue, Transact
 	pub backend: Arc<Backend>,
 	/// The chain task manager.
 	pub task_manager: TaskManager,
-	/// A keystore container instance..
+	/// A keystore container instance.
 	pub keystore_container: KeystoreContainer,
 	/// A chain selection algorithm instance.
 	pub select_chain: SelectChain,