diff --git a/.gitignore b/.gitignore
index afa9ed33f4a0369b47c7a5c519965249cb3f6859..d4828765708579155865b6fe7615c19fa6d300cc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,7 +30,6 @@ artifacts
 bin/node-template/Cargo.lock
 nohup.out
 polkadot_argument_parsing
-polkadot.*
 !docs/sdk/src/polkadot_sdk/polkadot.rs
 pwasm-alloc/Cargo.lock
 pwasm-libc/Cargo.lock
diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs
index 25b8ee10a931eff6a190586ec2959e4ad53a3de2..ae83f2ade3f6adbedf82bb881db309208fcafce0 100644
--- a/cumulus/client/service/src/lib.rs
+++ b/cumulus/client/service/src/lib.rs
@@ -40,10 +40,7 @@ use sc_consensus::{
 use sc_network::{config::SyncMode, service::traits::NetworkService, NetworkBackend};
 use sc_network_sync::SyncingService;
 use sc_network_transactions::TransactionsHandlerController;
-use sc_service::{
-	build_polkadot_syncing_strategy, Configuration, NetworkStarter, SpawnTaskHandle, TaskManager,
-	WarpSyncConfig,
-};
+use sc_service::{Configuration, NetworkStarter, SpawnTaskHandle, TaskManager, WarpSyncConfig};
 use sc_telemetry::{log, TelemetryWorkerHandle};
 use sc_utils::mpsc::TracingUnboundedSender;
 use sp_api::ProvideRuntimeApi;
@@ -429,7 +426,7 @@ pub struct BuildNetworkParams<
 pub async fn build_network<'a, Block, Client, RCInterface, IQ, Network>(
 	BuildNetworkParams {
 		parachain_config,
-		mut net_config,
+		net_config,
 		client,
 		transaction_pool,
 		para_id,
@@ -500,16 +497,6 @@ where
 		parachain_config.prometheus_config.as_ref().map(|config| &config.registry),
 	);
 
-	let syncing_strategy = build_polkadot_syncing_strategy(
-		parachain_config.protocol_id(),
-		parachain_config.chain_spec.fork_id(),
-		&mut net_config,
-		warp_sync_config,
-		client.clone(),
-		&spawn_handle,
-		parachain_config.prometheus_config.as_ref().map(|config| &config.registry),
-	)?;
-
 	sc_service::build_network(sc_service::BuildNetworkParams {
 		config: parachain_config,
 		net_config,
@@ -518,7 +505,7 @@ where
 		spawn_handle,
 		import_queue,
 		block_announce_validator_builder: Some(Box::new(move |_| block_announce_validator)),
-		syncing_strategy,
+		warp_sync_config,
 		block_relay: None,
 		metrics,
 	})
diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs b/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs
index e8043bd7b2aac6c8104262b4e1e5dfd3a76de577..b7fc3489da2549978f948f299b04ae87c441e16c 100644
--- a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs
+++ b/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs
@@ -25,7 +25,7 @@ use cumulus_primitives_core::ParaId;
 use sc_consensus::{DefaultImportQueue, LongestChain};
 use sc_consensus_manual_seal::rpc::{ManualSeal, ManualSealApiServer};
 use sc_network::NetworkBackend;
-use sc_service::{build_polkadot_syncing_strategy, Configuration, PartialComponents, TaskManager};
+use sc_service::{Configuration, PartialComponents, TaskManager};
 use sc_telemetry::TelemetryHandle;
 use sp_runtime::traits::Header;
 use std::{marker::PhantomData, sync::Arc};
@@ -85,7 +85,7 @@ impl<NodeSpec: NodeSpecT> ManualSealNode<NodeSpec> {
 		// Since this is a dev node, prevent it from connecting to peers.
 		config.network.default_peers_set.in_peers = 0;
 		config.network.default_peers_set.out_peers = 0;
-		let mut net_config = sc_network::config::FullNetworkConfiguration::<_, _, Net>::new(
+		let net_config = sc_network::config::FullNetworkConfiguration::<_, _, Net>::new(
 			&config.network,
 			config.prometheus_config.as_ref().map(|cfg| cfg.registry.clone()),
 		);
@@ -93,16 +93,6 @@ impl<NodeSpec: NodeSpecT> ManualSealNode<NodeSpec> {
 			config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
 		);
 
-		let syncing_strategy = build_polkadot_syncing_strategy(
-			config.protocol_id(),
-			config.chain_spec.fork_id(),
-			&mut net_config,
-			None,
-			client.clone(),
-			&task_manager.spawn_handle(),
-			config.prometheus_config.as_ref().map(|config| &config.registry),
-		)?;
-
 		let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
 			sc_service::build_network(sc_service::BuildNetworkParams {
 				config: &config,
@@ -112,7 +102,7 @@ impl<NodeSpec: NodeSpecT> ManualSealNode<NodeSpec> {
 				import_queue,
 				net_config,
 				block_announce_validator_builder: None,
-				syncing_strategy,
+				warp_sync_config: None,
 				block_relay: None,
 				metrics,
 			})?;
diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs
index abba91a38a97c43bca584edb38efd00b6d4654d3..d2424474302a6314295f928f110a8e1487aeeab1 100644
--- a/polkadot/node/service/src/lib.rs
+++ b/polkadot/node/service/src/lib.rs
@@ -80,7 +80,7 @@ use std::{collections::HashMap, path::PathBuf, sync::Arc, time::Duration};
 use prometheus_endpoint::Registry;
 #[cfg(feature = "full-node")]
 use sc_service::KeystoreContainer;
-use sc_service::{build_polkadot_syncing_strategy, RpcHandlers, SpawnTaskHandle};
+use sc_service::{RpcHandlers, SpawnTaskHandle};
 use sc_telemetry::TelemetryWorker;
 #[cfg(feature = "full-node")]
 use sc_telemetry::{Telemetry, TelemetryWorkerHandle};
@@ -1003,16 +1003,6 @@ pub fn new_full<
 		})
 	};
 
-	let syncing_strategy = build_polkadot_syncing_strategy(
-		config.protocol_id(),
-		config.chain_spec.fork_id(),
-		&mut net_config,
-		Some(WarpSyncConfig::WithProvider(warp_sync)),
-		client.clone(),
-		&task_manager.spawn_handle(),
-		config.prometheus_config.as_ref().map(|config| &config.registry),
-	)?;
-
 	let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
 		sc_service::build_network(sc_service::BuildNetworkParams {
 			config: &config,
@@ -1022,7 +1012,7 @@ pub fn new_full<
 			spawn_handle: task_manager.spawn_handle(),
 			import_queue,
 			block_announce_validator_builder: None,
-			syncing_strategy,
+			warp_sync_config: Some(WarpSyncConfig::WithProvider(warp_sync)),
 			block_relay: None,
 			metrics,
 		})?;
diff --git a/prdoc/pr_5737.prdoc b/prdoc/pr_5737.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..a122e4574a9cecba6160ede60055a4443182591b
--- /dev/null
+++ b/prdoc/pr_5737.prdoc
@@ -0,0 +1,25 @@
+title: Make syncing service an argument of `build_network`
+
+doc:
+  - audience: Node Dev
+    description: |
+      `build_network` is accompanied with lower-level `build_network_advanced` with simpler API that does not create
+      syncing engine internally, but instead takes a handle to syncing service as an argument. In most cases typical
+      syncing engine with polkadot syncing strategy and default block downloader can be created with newly introduced
+      `sc_service::build_default_syncing_engine()` function, but lower-level `build_default_block_downloader` also
+      exists for those needing more customization.
+
+      These changes allow developers higher than ever control over syncing implementation, but `build_network` is still
+      available for easier high-level usage.
+
+crates:
+  - name: cumulus-client-service
+    bump: patch
+  - name: polkadot-service
+    bump: patch
+  - name: sc-consensus
+    bump: major
+  - name: sc-service
+    bump: major
+  - name: sc-network-sync
+    bump: major
diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs
index 057e0bbdcefc751c2d970633fcbc017d15e0c8ca..008cac4ef8a88b3b147abf98047e6d5ecbe0b0ba 100644
--- a/substrate/bin/node/cli/src/service.rs
+++ b/substrate/bin/node/cli/src/service.rs
@@ -32,7 +32,6 @@ use frame_system_rpc_runtime_api::AccountNonceApi;
 use futures::prelude::*;
 use kitchensink_runtime::RuntimeApi;
 use node_primitives::Block;
-use polkadot_sdk::sc_service::build_polkadot_syncing_strategy;
 use sc_client_api::{Backend, BlockBackend};
 use sc_consensus_babe::{self, SlotProportion};
 use sc_network::{
@@ -514,16 +513,6 @@ pub fn new_full_base<N: NetworkBackend<Block, <Block as BlockT>::Hash>>(
 		Vec::default(),
 	));
 
-	let syncing_strategy = build_polkadot_syncing_strategy(
-		config.protocol_id(),
-		config.chain_spec.fork_id(),
-		&mut net_config,
-		Some(WarpSyncConfig::WithProvider(warp_sync)),
-		client.clone(),
-		&task_manager.spawn_handle(),
-		config.prometheus_config.as_ref().map(|config| &config.registry),
-	)?;
-
 	let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
 		sc_service::build_network(sc_service::BuildNetworkParams {
 			config: &config,
@@ -533,7 +522,7 @@ pub fn new_full_base<N: NetworkBackend<Block, <Block as BlockT>::Hash>>(
 			spawn_handle: task_manager.spawn_handle(),
 			import_queue,
 			block_announce_validator_builder: None,
-			syncing_strategy,
+			warp_sync_config: Some(WarpSyncConfig::WithProvider(warp_sync)),
 			block_relay: None,
 			metrics,
 		})?;
diff --git a/substrate/client/consensus/common/src/import_queue.rs b/substrate/client/consensus/common/src/import_queue.rs
index 1baa67398a49c3461a2d4f85d9c23de3191e9c22..602683907d4824900f5131c99168a43c833aee3a 100644
--- a/substrate/client/consensus/common/src/import_queue.rs
+++ b/substrate/client/consensus/common/src/import_queue.rs
@@ -107,7 +107,7 @@ pub trait Verifier<B: BlockT>: Send + Sync {
 ///
 /// The `import_*` methods can be called in order to send elements for the import queue to verify.
 pub trait ImportQueueService<B: BlockT>: Send {
-	/// Import bunch of blocks, every next block must be an ancestor of the previous block in the
+	/// Import a bunch of blocks, every next block must be an ancestor of the previous block in the
 	/// list.
 	fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec<IncomingBlock<B>>);
 
@@ -132,21 +132,21 @@ pub trait ImportQueue<B: BlockT>: Send {
 	/// This method should behave in a way similar to `Future::poll`. It can register the current
 	/// task and notify later when more actions are ready to be polled. To continue the comparison,
 	/// it is as if this method always returned `Poll::Pending`.
-	fn poll_actions(&mut self, cx: &mut futures::task::Context, link: &mut dyn Link<B>);
+	fn poll_actions(&mut self, cx: &mut futures::task::Context, link: &dyn Link<B>);
 
 	/// Start asynchronous runner for import queue.
 	///
 	/// Takes an object implementing [`Link`] which allows the import queue to
 	/// influence the synchronization process.
-	async fn run(self, link: Box<dyn Link<B>>);
+	async fn run(self, link: &dyn Link<B>);
 }
 
 /// Hooks that the verification queue can use to influence the synchronization
 /// algorithm.
-pub trait Link<B: BlockT>: Send {
+pub trait Link<B: BlockT>: Send + Sync {
 	/// Batch of blocks imported, with or without error.
 	fn blocks_processed(
-		&mut self,
+		&self,
 		_imported: usize,
 		_count: usize,
 		_results: Vec<(BlockImportResult<B>, B::Hash)>,
@@ -155,7 +155,7 @@ pub trait Link<B: BlockT>: Send {
 
 	/// Justification import result.
 	fn justification_imported(
-		&mut self,
+		&self,
 		_who: RuntimeOrigin,
 		_hash: &B::Hash,
 		_number: NumberFor<B>,
@@ -164,7 +164,7 @@ pub trait Link<B: BlockT>: Send {
 	}
 
 	/// Request a justification for the given block.
-	fn request_justification(&mut self, _hash: &B::Hash, _number: NumberFor<B>) {}
+	fn request_justification(&self, _hash: &B::Hash, _number: NumberFor<B>) {}
 }
 
 /// Block import successful result.
diff --git a/substrate/client/consensus/common/src/import_queue/basic_queue.rs b/substrate/client/consensus/common/src/import_queue/basic_queue.rs
index 7b371145e2e7df871d8c959bdc04fd405451ff39..21270859dd75ccaf04abad6a5d2cf7c4ed6fe003 100644
--- a/substrate/client/consensus/common/src/import_queue/basic_queue.rs
+++ b/substrate/client/consensus/common/src/import_queue/basic_queue.rs
@@ -177,7 +177,7 @@ impl<B: BlockT> ImportQueue<B> for BasicQueue<B> {
 	}
 
 	/// Poll actions from network.
-	fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link<B>) {
+	fn poll_actions(&mut self, cx: &mut Context, link: &dyn Link<B>) {
 		if self.result_port.poll_actions(cx, link).is_err() {
 			log::error!(
 				target: LOG_TARGET,
@@ -190,9 +190,9 @@ impl<B: BlockT> ImportQueue<B> for BasicQueue<B> {
 	///
 	/// Takes an object implementing [`Link`] which allows the import queue to
 	/// influence the synchronization process.
-	async fn run(mut self, mut link: Box<dyn Link<B>>) {
+	async fn run(mut self, link: &dyn Link<B>) {
 		loop {
-			if let Err(_) = self.result_port.next_action(&mut *link).await {
+			if let Err(_) = self.result_port.next_action(link).await {
 				log::error!(target: "sync", "poll_actions: Background import task is no longer alive");
 				return
 			}
@@ -223,7 +223,7 @@ mod worker_messages {
 async fn block_import_process<B: BlockT>(
 	mut block_import: BoxBlockImport<B>,
 	verifier: impl Verifier<B>,
-	mut result_sender: BufferedLinkSender<B>,
+	result_sender: BufferedLinkSender<B>,
 	mut block_import_receiver: TracingUnboundedReceiver<worker_messages::ImportBlocks<B>>,
 	metrics: Option<Metrics>,
 ) {
@@ -501,6 +501,7 @@ mod tests {
 		import_queue::Verifier,
 	};
 	use futures::{executor::block_on, Future};
+	use parking_lot::Mutex;
 	use sp_test_primitives::{Block, BlockNumber, Hash, Header};
 
 	#[async_trait::async_trait]
@@ -558,29 +559,29 @@ mod tests {
 
 	#[derive(Default)]
 	struct TestLink {
-		events: Vec<Event>,
+		events: Mutex<Vec<Event>>,
 	}
 
 	impl Link<Block> for TestLink {
 		fn blocks_processed(
-			&mut self,
+			&self,
 			_imported: usize,
 			_count: usize,
 			results: Vec<(Result<BlockImportStatus<BlockNumber>, BlockImportError>, Hash)>,
 		) {
 			if let Some(hash) = results.into_iter().find_map(|(r, h)| r.ok().map(|_| h)) {
-				self.events.push(Event::BlockImported(hash));
+				self.events.lock().push(Event::BlockImported(hash));
 			}
 		}
 
 		fn justification_imported(
-			&mut self,
+			&self,
 			_who: RuntimeOrigin,
 			hash: &Hash,
 			_number: BlockNumber,
 			_success: bool,
 		) {
-			self.events.push(Event::JustificationImported(*hash))
+			self.events.lock().push(Event::JustificationImported(*hash))
 		}
 	}
 
@@ -638,7 +639,7 @@ mod tests {
 			hash
 		};
 
-		let mut link = TestLink::default();
+		let link = TestLink::default();
 
 		// we send a bunch of tasks to the worker
 		let block1 = import_block(1);
@@ -653,13 +654,13 @@ mod tests {
 
 		// we poll the worker until we have processed 9 events
 		block_on(futures::future::poll_fn(|cx| {
-			while link.events.len() < 9 {
+			while link.events.lock().len() < 9 {
 				match Future::poll(Pin::new(&mut worker), cx) {
 					Poll::Pending => {},
 					Poll::Ready(()) => panic!("import queue worker should not conclude."),
 				}
 
-				result_port.poll_actions(cx, &mut link).unwrap();
+				result_port.poll_actions(cx, &link).unwrap();
 			}
 
 			Poll::Ready(())
@@ -667,8 +668,8 @@ mod tests {
 
 		// all justification tasks must be done before any block import work
 		assert_eq!(
-			link.events,
-			vec![
+			&*link.events.lock(),
+			&[
 				Event::JustificationImported(justification1),
 				Event::JustificationImported(justification2),
 				Event::JustificationImported(justification3),
diff --git a/substrate/client/consensus/common/src/import_queue/buffered_link.rs b/substrate/client/consensus/common/src/import_queue/buffered_link.rs
index c23a4b0d5d0abdcb2ec9291f44a8709c349fc048..67131b06a32e5e674f552c07245211fed53c3154 100644
--- a/substrate/client/consensus/common/src/import_queue/buffered_link.rs
+++ b/substrate/client/consensus/common/src/import_queue/buffered_link.rs
@@ -27,13 +27,13 @@
 //! # use sc_consensus::import_queue::buffered_link::buffered_link;
 //! # use sp_test_primitives::Block;
 //! # struct DummyLink; impl Link<Block> for DummyLink {}
-//! # let mut my_link = DummyLink;
+//! # let my_link = DummyLink;
 //! let (mut tx, mut rx) = buffered_link::<Block>(100_000);
 //! tx.blocks_processed(0, 0, vec![]);
 //!
 //! // Calls `my_link.blocks_processed(0, 0, vec![])` when polled.
 //! let _fut = futures::future::poll_fn(move |cx| {
-//! 	rx.poll_actions(cx, &mut my_link);
+//! 	rx.poll_actions(cx, &my_link).unwrap();
 //! 	std::task::Poll::Pending::<()>
 //! });
 //! ```
@@ -90,7 +90,7 @@ pub enum BlockImportWorkerMsg<B: BlockT> {
 
 impl<B: BlockT> Link<B> for BufferedLinkSender<B> {
 	fn blocks_processed(
-		&mut self,
+		&self,
 		imported: usize,
 		count: usize,
 		results: Vec<(BlockImportResult<B>, B::Hash)>,
@@ -101,7 +101,7 @@ impl<B: BlockT> Link<B> for BufferedLinkSender<B> {
 	}
 
 	fn justification_imported(
-		&mut self,
+		&self,
 		who: RuntimeOrigin,
 		hash: &B::Hash,
 		number: NumberFor<B>,
@@ -111,7 +111,7 @@ impl<B: BlockT> Link<B> for BufferedLinkSender<B> {
 		let _ = self.tx.unbounded_send(msg);
 	}
 
-	fn request_justification(&mut self, hash: &B::Hash, number: NumberFor<B>) {
+	fn request_justification(&self, hash: &B::Hash, number: NumberFor<B>) {
 		let _ = self
 			.tx
 			.unbounded_send(BlockImportWorkerMsg::RequestJustification(*hash, number));
@@ -125,7 +125,7 @@ pub struct BufferedLinkReceiver<B: BlockT> {
 
 impl<B: BlockT> BufferedLinkReceiver<B> {
 	/// Send action for the synchronization to perform.
-	pub fn send_actions(&mut self, msg: BlockImportWorkerMsg<B>, link: &mut dyn Link<B>) {
+	pub fn send_actions(&mut self, msg: BlockImportWorkerMsg<B>, link: &dyn Link<B>) {
 		match msg {
 			BlockImportWorkerMsg::BlocksProcessed(imported, count, results) =>
 				link.blocks_processed(imported, count, results),
@@ -144,7 +144,7 @@ impl<B: BlockT> BufferedLinkReceiver<B> {
 	/// it is as if this method always returned `Poll::Pending`.
 	///
 	/// Returns an error if the corresponding [`BufferedLinkSender`] has been closed.
-	pub fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link<B>) -> Result<(), ()> {
+	pub fn poll_actions(&mut self, cx: &mut Context, link: &dyn Link<B>) -> Result<(), ()> {
 		loop {
 			let msg = match Stream::poll_next(Pin::new(&mut self.rx), cx) {
 				Poll::Ready(Some(msg)) => msg,
@@ -152,12 +152,12 @@ impl<B: BlockT> BufferedLinkReceiver<B> {
 				Poll::Pending => break Ok(()),
 			};
 
-			self.send_actions(msg, &mut *link);
+			self.send_actions(msg, link);
 		}
 	}
 
 	/// Poll next element from import queue and send the corresponding action command over the link.
-	pub async fn next_action(&mut self, link: &mut dyn Link<B>) -> Result<(), ()> {
+	pub async fn next_action(&mut self, link: &dyn Link<B>) -> Result<(), ()> {
 		if let Some(msg) = self.rx.next().await {
 			self.send_actions(msg, link);
 			return Ok(())
diff --git a/substrate/client/consensus/common/src/import_queue/mock.rs b/substrate/client/consensus/common/src/import_queue/mock.rs
index 64ac532ded854194121815878b935aff0291e5cd..a238f72568ca655cf51b9c01e2f2a0d34ba1125c 100644
--- a/substrate/client/consensus/common/src/import_queue/mock.rs
+++ b/substrate/client/consensus/common/src/import_queue/mock.rs
@@ -40,7 +40,7 @@ mockall::mock! {
 	impl<B: BlockT> ImportQueue<B> for ImportQueue<B> {
 		fn service(&self) -> Box<dyn ImportQueueService<B>>;
 		fn service_ref(&mut self) -> &mut dyn ImportQueueService<B>;
-		fn poll_actions<'a>(&mut self, cx: &mut futures::task::Context<'a>, link: &mut dyn Link<B>);
-		async fn run(self, link: Box<dyn Link<B>>);
+		fn poll_actions<'a>(&mut self, cx: &mut futures::task::Context<'a>, link: &dyn Link<B>);
+		async fn run(self, link: &'__mockall_link dyn Link<B>);
 	}
 }
diff --git a/substrate/client/network/sync/src/block_relay_protocol.rs b/substrate/client/network/sync/src/block_relay_protocol.rs
index 3c5b3739e8222395b896003af8bc72e9ea68b9e7..13639d851b27241155701dbfcd5752a7d6b22639 100644
--- a/substrate/client/network/sync/src/block_relay_protocol.rs
+++ b/substrate/client/network/sync/src/block_relay_protocol.rs
@@ -21,7 +21,7 @@ use sc_network::{request_responses::RequestFailure, NetworkBackend, ProtocolName
 use sc_network_common::sync::message::{BlockData, BlockRequest};
 use sc_network_types::PeerId;
 use sp_runtime::traits::Block as BlockT;
-use std::sync::Arc;
+use std::{fmt, sync::Arc};
 
 /// The serving side of the block relay protocol. It runs a single instance
 /// of the server task that processes the incoming protocol messages.
@@ -34,7 +34,10 @@ pub trait BlockServer<Block: BlockT>: Send {
 /// The client side stub to download blocks from peers. This is a handle
 /// that can be used to initiate concurrent downloads.
 #[async_trait::async_trait]
-pub trait BlockDownloader<Block: BlockT>: Send + Sync {
+pub trait BlockDownloader<Block: BlockT>: fmt::Debug + Send + Sync {
+	/// Protocol name used by block downloader.
+	fn protocol_name(&self) -> &ProtocolName;
+
 	/// Performs the protocol specific sequence to fetch the blocks from the peer.
 	/// Output: if the download succeeds, the response is a `Vec<u8>` which is
 	/// in a format specific to the protocol implementation. The block data
diff --git a/substrate/client/network/sync/src/block_request_handler.rs b/substrate/client/network/sync/src/block_request_handler.rs
index 6e970b3993106958a2b2bafbd28ae0bc11afad2f..80234170bc203a9d6823dc8a5352e3ac51b6abd8 100644
--- a/substrate/client/network/sync/src/block_request_handler.rs
+++ b/substrate/client/network/sync/src/block_request_handler.rs
@@ -502,6 +502,7 @@ enum HandleRequestError {
 }
 
 /// The full block downloader implementation of [`BlockDownloader].
+#[derive(Debug)]
 pub struct FullBlockDownloader {
 	protocol_name: ProtocolName,
 	network: NetworkServiceHandle,
@@ -576,6 +577,10 @@ impl FullBlockDownloader {
 
 #[async_trait::async_trait]
 impl<B: BlockT> BlockDownloader<B> for FullBlockDownloader {
+	fn protocol_name(&self) -> &ProtocolName {
+		&self.protocol_name
+	}
+
 	async fn download_blocks(
 		&self,
 		who: PeerId,
diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs
index dceea9954c6e9ff696d9ee01098713857984ed41..cc2089d1974c37f1444aa5c8cdcdc122a259f78f 100644
--- a/substrate/client/network/sync/src/engine.rs
+++ b/substrate/client/network/sync/src/engine.rs
@@ -23,30 +23,22 @@ use crate::{
 	block_announce_validator::{
 		BlockAnnounceValidationResult, BlockAnnounceValidator as BlockAnnounceValidatorStream,
 	},
-	block_relay_protocol::{BlockDownloader, BlockResponseError},
 	pending_responses::{PendingResponses, ResponseEvent},
-	schema::v1::{StateRequest, StateResponse},
 	service::{
 		self,
 		syncing_service::{SyncingService, ToServiceCommand},
 	},
-	strategy::{
-		warp::{EncodedProof, WarpProofRequest},
-		StrategyKey, SyncingAction, SyncingStrategy,
-	},
-	types::{
-		BadPeer, ExtendedPeerInfo, OpaqueStateRequest, OpaqueStateResponse, PeerRequest, SyncEvent,
-	},
+	strategy::{SyncingAction, SyncingStrategy},
+	types::{BadPeer, ExtendedPeerInfo, SyncEvent},
 	LOG_TARGET,
 };
 
 use codec::{Decode, DecodeAll, Encode};
-use futures::{channel::oneshot, FutureExt, StreamExt};
+use futures::{channel::oneshot, StreamExt};
 use log::{debug, error, trace, warn};
 use prometheus_endpoint::{
 	register, Counter, Gauge, MetricSource, Opts, PrometheusError, Registry, SourcedGauge, U64,
 };
-use prost::Message;
 use schnellru::{ByLength, LruMap};
 use tokio::time::{Interval, MissedTickBehavior};
 
@@ -55,7 +47,7 @@ use sc_consensus::{import_queue::ImportQueueService, IncomingBlock};
 use sc_network::{
 	config::{FullNetworkConfiguration, NotificationHandshake, ProtocolId, SetConfig},
 	peer_store::PeerStoreProvider,
-	request_responses::{IfDisconnected, OutboundFailure, RequestFailure},
+	request_responses::{OutboundFailure, RequestFailure},
 	service::{
 		traits::{Direction, NotificationConfig, NotificationEvent, ValidationResult},
 		NotificationMetrics,
@@ -66,7 +58,7 @@ use sc_network::{
 };
 use sc_network_common::{
 	role::Roles,
-	sync::message::{BlockAnnounce, BlockAnnouncesHandshake, BlockRequest, BlockState},
+	sync::message::{BlockAnnounce, BlockAnnouncesHandshake, BlockState},
 };
 use sc_network_types::PeerId;
 use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
@@ -102,8 +94,6 @@ mod rep {
 	pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch");
 	/// Peer send us a block announcement that failed at validation.
 	pub const BAD_BLOCK_ANNOUNCEMENT: Rep = Rep::new(-(1 << 12), "Bad block announcement");
-	/// We received a message that failed to decode.
-	pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message");
 	/// Peer is on unsupported protocol version.
 	pub const BAD_PROTOCOL: Rep = Rep::new_fatal("Unsupported protocol");
 	/// Reputation change when a peer refuses a request.
@@ -264,10 +254,7 @@ pub struct SyncingEngine<B: BlockT, Client> {
 	peer_store_handle: Arc<dyn PeerStoreProvider>,
 
 	/// Pending responses
-	pending_responses: PendingResponses<B>,
-
-	/// Block downloader
-	block_downloader: Arc<dyn BlockDownloader<B>>,
+	pending_responses: PendingResponses,
 
 	/// Handle to import queue.
 	import_queue: Box<dyn ImportQueueService<B>>,
@@ -291,12 +278,11 @@ where
 		network_metrics: NotificationMetrics,
 		net_config: &FullNetworkConfiguration<B, <B as BlockT>::Hash, N>,
 		protocol_id: ProtocolId,
-		fork_id: &Option<String>,
+		fork_id: Option<&str>,
 		block_announce_validator: Box<dyn BlockAnnounceValidator<B> + Send>,
 		syncing_strategy: Box<dyn SyncingStrategy<B>>,
 		network_service: service::network::NetworkServiceHandle,
 		import_queue: Box<dyn ImportQueueService<B>>,
-		block_downloader: Arc<dyn BlockDownloader<B>>,
 		peer_store_handle: Arc<dyn PeerStoreProvider>,
 	) -> Result<(Self, SyncingService<B>, N::NotificationProtocolConfig), ClientError>
 	where
@@ -417,7 +403,6 @@ where
 					None
 				},
 				pending_responses: PendingResponses::new(),
-				block_downloader,
 				import_queue,
 			},
 			SyncingService::new(tx, num_connected, is_major_syncing),
@@ -583,57 +568,42 @@ where
 	}
 
 	fn process_strategy_actions(&mut self) -> Result<(), ClientError> {
-		for action in self.strategy.actions()? {
+		for action in self.strategy.actions(&self.network_service)? {
 			match action {
-				SyncingAction::SendBlockRequest { peer_id, key, request } => {
-					// Sending block request implies dropping obsolete pending response as we are
-					// not interested in it anymore (see [`SyncingAction::SendBlockRequest`]).
-					let removed = self.pending_responses.remove(peer_id, key);
-					self.send_block_request(peer_id, key, request.clone());
-
-					if removed {
-						warn!(
-							target: LOG_TARGET,
-							"Processed `ChainSyncAction::SendBlockRequest` to {} from {:?} with {:?}. \
-							 Stale response removed!",
-							peer_id,
-							key,
-							request,
-						)
-					} else {
+				SyncingAction::StartRequest { peer_id, key, request, remove_obsolete } => {
+					if !self.peers.contains_key(&peer_id) {
 						trace!(
 							target: LOG_TARGET,
-							"Processed `ChainSyncAction::SendBlockRequest` to {} from {:?} with {:?}.",
-							peer_id,
-							key,
-							request,
-						)
+							"Cannot start request with strategy key {key:?} to unknown peer \
+							{peer_id}",
+						);
+						debug_assert!(false);
+						continue;
 					}
+					if remove_obsolete {
+						if self.pending_responses.remove(peer_id, key) {
+							warn!(
+								target: LOG_TARGET,
+								"Processed `SyncingAction::StartRequest` to {peer_id} with \
+								strategy key {key:?}. Stale response removed!",
+							)
+						} else {
+							trace!(
+								target: LOG_TARGET,
+								"Processed `SyncingAction::StartRequest` to {peer_id} with \
+								strategy key {key:?}.",
+							)
+						}
+					}
+
+					self.pending_responses.insert(peer_id, key, request);
 				},
 				SyncingAction::CancelRequest { peer_id, key } => {
 					let removed = self.pending_responses.remove(peer_id, key);
 
 					trace!(
 						target: LOG_TARGET,
-						"Processed {action:?}, response removed: {removed}.",
-					);
-				},
-				SyncingAction::SendStateRequest { peer_id, key, protocol_name, request } => {
-					self.send_state_request(peer_id, key, protocol_name, request);
-
-					trace!(
-						target: LOG_TARGET,
-						"Processed `ChainSyncAction::SendStateRequest` to {peer_id}.",
-					);
-				},
-				SyncingAction::SendWarpProofRequest { peer_id, key, protocol_name, request } => {
-					self.send_warp_proof_request(peer_id, key, protocol_name, request.clone());
-
-					trace!(
-						target: LOG_TARGET,
-						"Processed `ChainSyncAction::SendWarpProofRequest` to {}, request: {:?}.",
-						peer_id,
-						request,
+						"Processed `SyncingAction::CancelRequest`, response removed: {removed}.",
 					);
 				},
 				SyncingAction::DropPeer(BadPeer(peer_id, rep)) => {
@@ -1000,160 +970,12 @@ where
 		Ok(())
 	}
 
-	fn send_block_request(&mut self, peer_id: PeerId, key: StrategyKey, request: BlockRequest<B>) {
-		if !self.peers.contains_key(&peer_id) {
-			trace!(target: LOG_TARGET, "Cannot send block request to unknown peer {peer_id}");
-			debug_assert!(false);
-			return;
-		}
-
-		let downloader = self.block_downloader.clone();
-
-		self.pending_responses.insert(
-			peer_id,
-			key,
-			PeerRequest::Block(request.clone()),
-			async move { downloader.download_blocks(peer_id, request).await }.boxed(),
-		);
-	}
-
-	fn send_state_request(
-		&mut self,
-		peer_id: PeerId,
-		key: StrategyKey,
-		protocol_name: ProtocolName,
-		request: OpaqueStateRequest,
-	) {
-		if !self.peers.contains_key(&peer_id) {
-			trace!(target: LOG_TARGET, "Cannot send state request to unknown peer {peer_id}");
-			debug_assert!(false);
-			return;
-		}
-
-		let (tx, rx) = oneshot::channel();
-
-		self.pending_responses.insert(peer_id, key, PeerRequest::State, rx.boxed());
-
-		match Self::encode_state_request(&request) {
-			Ok(data) => {
-				self.network_service.start_request(
-					peer_id,
-					protocol_name,
-					data,
-					tx,
-					IfDisconnected::ImmediateError,
-				);
-			},
-			Err(err) => {
-				log::warn!(
-					target: LOG_TARGET,
-					"Failed to encode state request {request:?}: {err:?}",
-				);
-			},
-		}
-	}
-
-	fn send_warp_proof_request(
-		&mut self,
-		peer_id: PeerId,
-		key: StrategyKey,
-		protocol_name: ProtocolName,
-		request: WarpProofRequest<B>,
-	) {
-		if !self.peers.contains_key(&peer_id) {
-			trace!(target: LOG_TARGET, "Cannot send warp proof request to unknown peer {peer_id}");
-			debug_assert!(false);
-			return;
-		}
-
-		let (tx, rx) = oneshot::channel();
-
-		self.pending_responses.insert(peer_id, key, PeerRequest::WarpProof, rx.boxed());
-
-		self.network_service.start_request(
-			peer_id,
-			protocol_name,
-			request.encode(),
-			tx,
-			IfDisconnected::ImmediateError,
-		);
-	}
-
-	fn encode_state_request(request: &OpaqueStateRequest) -> Result<Vec<u8>, String> {
-		let request: &StateRequest = request.0.downcast_ref().ok_or_else(|| {
-			"Failed to downcast opaque state response during encoding, this is an \
-				implementation bug."
-				.to_string()
-		})?;
-
-		Ok(request.encode_to_vec())
-	}
-
-	fn decode_state_response(response: &[u8]) -> Result<OpaqueStateResponse, String> {
-		let response = StateResponse::decode(response)
-			.map_err(|error| format!("Failed to decode state response: {error}"))?;
-
-		Ok(OpaqueStateResponse(Box::new(response)))
-	}
-
-	fn process_response_event(&mut self, response_event: ResponseEvent<B>) {
-		let ResponseEvent { peer_id, key, request, response } = response_event;
+	fn process_response_event(&mut self, response_event: ResponseEvent) {
+		let ResponseEvent { peer_id, key, response: response_result } = response_event;
 
-		match response {
-			Ok(Ok((resp, _))) => match request {
-				PeerRequest::Block(req) => {
-					match self.block_downloader.block_response_into_blocks(&req, resp) {
-						Ok(blocks) => {
-							self.strategy.on_block_response(peer_id, key, req, blocks);
-						},
-						Err(BlockResponseError::DecodeFailed(e)) => {
-							debug!(
-								target: LOG_TARGET,
-								"Failed to decode block response from peer {:?}: {:?}.",
-								peer_id,
-								e
-							);
-							self.network_service.report_peer(peer_id, rep::BAD_MESSAGE);
-							self.network_service.disconnect_peer(
-								peer_id,
-								self.block_announce_protocol_name.clone(),
-							);
-							return;
-						},
-						Err(BlockResponseError::ExtractionFailed(e)) => {
-							debug!(
-								target: LOG_TARGET,
-								"Failed to extract blocks from peer response {:?}: {:?}.",
-								peer_id,
-								e
-							);
-							self.network_service.report_peer(peer_id, rep::BAD_MESSAGE);
-							return;
-						},
-					}
-				},
-				PeerRequest::State => {
-					let response = match Self::decode_state_response(&resp[..]) {
-						Ok(proto) => proto,
-						Err(e) => {
-							debug!(
-								target: LOG_TARGET,
-								"Failed to decode state response from peer {peer_id:?}: {e:?}.",
-							);
-							self.network_service.report_peer(peer_id, rep::BAD_MESSAGE);
-							self.network_service.disconnect_peer(
-								peer_id,
-								self.block_announce_protocol_name.clone(),
-							);
-							return;
-						},
-					};
-
-					self.strategy.on_state_response(peer_id, key, response);
-				},
-				PeerRequest::WarpProof => {
-					self.strategy.on_warp_proof_response(&peer_id, key, EncodedProof(resp));
-				},
+		match response_result {
+			Ok(Ok((response, protocol_name))) => {
+				self.strategy.on_generic_response(&peer_id, key, protocol_name, response);
 			},
 			Ok(Err(e)) => {
 				debug!(target: LOG_TARGET, "Request to peer {peer_id:?} failed: {e:?}.");
@@ -1214,7 +1036,7 @@ where
 	/// Get config for the block announcement protocol
 	fn get_block_announce_proto_config<N: NetworkBackend<B, <B as BlockT>::Hash>>(
 		protocol_id: ProtocolId,
-		fork_id: &Option<String>,
+		fork_id: Option<&str>,
 		roles: Roles,
 		best_number: NumberFor<B>,
 		best_hash: B::Hash,
@@ -1225,7 +1047,7 @@ where
 	) -> (N::NotificationProtocolConfig, Box<dyn NotificationService>) {
 		let block_announces_protocol = {
 			let genesis_hash = genesis_hash.as_ref();
-			if let Some(ref fork_id) = fork_id {
+			if let Some(fork_id) = fork_id {
 				format!(
 					"/{}/{}/block-announces/1",
 					array_bytes::bytes2hex("", genesis_hash),
diff --git a/substrate/client/network/sync/src/mock.rs b/substrate/client/network/sync/src/mock.rs
index 741fa7139583f0453d97810cba071fb37ee6e5ee..bf25156f9703dc67d3ff84edf1599c70995c45ae 100644
--- a/substrate/client/network/sync/src/mock.rs
+++ b/substrate/client/network/sync/src/mock.rs
@@ -27,10 +27,13 @@ use sc_network_types::PeerId;
 use sp_runtime::traits::Block as BlockT;
 
 mockall::mock! {
+	#[derive(Debug)]
 	pub BlockDownloader<Block: BlockT> {}
 
 	#[async_trait::async_trait]
 	impl<Block: BlockT> BlockDownloaderT<Block> for BlockDownloader<Block> {
+		fn protocol_name(&self) -> &ProtocolName;
+
 		async fn download_blocks(
 			&self,
 			who: PeerId,
diff --git a/substrate/client/network/sync/src/pending_responses.rs b/substrate/client/network/sync/src/pending_responses.rs
index 7d2d598a2e061b5daa252dd12076cc0534bb44c8..46e6ae62632819f176a68a3d0668ff9c8c921fb5 100644
--- a/substrate/client/network/sync/src/pending_responses.rs
+++ b/substrate/client/network/sync/src/pending_responses.rs
@@ -19,7 +19,7 @@
 //! [`PendingResponses`] is responsible for keeping track of pending responses and
 //! polling them. [`Stream`] implemented by [`PendingResponses`] never terminates.
 
-use crate::{strategy::StrategyKey, types::PeerRequest, LOG_TARGET};
+use crate::{strategy::StrategyKey, LOG_TARGET};
 use futures::{
 	channel::oneshot,
 	future::BoxFuture,
@@ -27,61 +27,49 @@ use futures::{
 	FutureExt, StreamExt,
 };
 use log::error;
+use std::any::Any;
 
 use sc_network::{request_responses::RequestFailure, types::ProtocolName};
 use sc_network_types::PeerId;
-use sp_runtime::traits::Block as BlockT;
 use std::task::{Context, Poll, Waker};
 use tokio_stream::StreamMap;
 
 /// Response result.
-type ResponseResult = Result<Result<(Vec<u8>, ProtocolName), RequestFailure>, oneshot::Canceled>;
+type ResponseResult =
+	Result<Result<(Box<dyn Any + Send>, ProtocolName), RequestFailure>, oneshot::Canceled>;
 
 /// A future yielding [`ResponseResult`].
-type ResponseFuture = BoxFuture<'static, ResponseResult>;
+pub(crate) type ResponseFuture = BoxFuture<'static, ResponseResult>;
 
 /// An event we receive once a pending response future resolves.
-pub(crate) struct ResponseEvent<B: BlockT> {
+pub(crate) struct ResponseEvent {
 	pub peer_id: PeerId,
 	pub key: StrategyKey,
-	pub request: PeerRequest<B>,
 	pub response: ResponseResult,
 }
 
 /// Stream taking care of polling pending responses.
-pub(crate) struct PendingResponses<B: BlockT> {
+pub(crate) struct PendingResponses {
 	/// Pending responses
-	pending_responses:
-		StreamMap<(PeerId, StrategyKey), BoxStream<'static, (PeerRequest<B>, ResponseResult)>>,
+	pending_responses: StreamMap<(PeerId, StrategyKey), BoxStream<'static, ResponseResult>>,
 	/// Waker to implement never terminating stream
 	waker: Option<Waker>,
 }
 
-impl<B: BlockT> PendingResponses<B> {
+impl PendingResponses {
 	pub fn new() -> Self {
 		Self { pending_responses: StreamMap::new(), waker: None }
 	}
 
-	pub fn insert(
-		&mut self,
-		peer_id: PeerId,
-		key: StrategyKey,
-		request: PeerRequest<B>,
-		response_future: ResponseFuture,
-	) {
-		let request_type = request.get_type();
-
+	pub fn insert(&mut self, peer_id: PeerId, key: StrategyKey, response_future: ResponseFuture) {
 		if self
 			.pending_responses
-			.insert(
-				(peer_id, key),
-				Box::pin(async move { (request, response_future.await) }.into_stream()),
-			)
+			.insert((peer_id, key), Box::pin(response_future.into_stream()))
 			.is_some()
 		{
 			error!(
 				target: LOG_TARGET,
-				"Discarded pending response from peer {peer_id}, request type: {request_type:?}.",
+				"Discarded pending response from peer {peer_id}, strategy key: {key:?}.",
 			);
 			debug_assert!(false);
 		}
@@ -112,21 +100,21 @@ impl<B: BlockT> PendingResponses<B> {
 	}
 }
 
-impl<B: BlockT> Stream for PendingResponses<B> {
-	type Item = ResponseEvent<B>;
+impl Stream for PendingResponses {
+	type Item = ResponseEvent;
 
 	fn poll_next(
 		mut self: std::pin::Pin<&mut Self>,
 		cx: &mut Context<'_>,
 	) -> Poll<Option<Self::Item>> {
 		match self.pending_responses.poll_next_unpin(cx) {
-			Poll::Ready(Some(((peer_id, key), (request, response)))) => {
+			Poll::Ready(Some(((peer_id, key), response))) => {
 				// We need to manually remove the stream, because `StreamMap` doesn't know yet that
 				// it's going to yield `None`, so may not remove it before the next request is made
 				// to the same peer.
 				self.pending_responses.remove(&(peer_id, key));
 
-				Poll::Ready(Some(ResponseEvent { peer_id, key, request, response }))
+				Poll::Ready(Some(ResponseEvent { peer_id, key, response }))
 			},
 			Poll::Ready(None) | Poll::Pending => {
 				self.waker = Some(cx.waker().clone());
@@ -138,7 +126,7 @@ impl<B: BlockT> Stream for PendingResponses<B> {
 }
 
 // As [`PendingResponses`] never terminates, we can easily implement [`FusedStream`] for it.
-impl<B: BlockT> FusedStream for PendingResponses<B> {
+impl FusedStream for PendingResponses {
 	fn is_terminated(&self) -> bool {
 		false
 	}
diff --git a/substrate/client/network/sync/src/service/mock.rs b/substrate/client/network/sync/src/service/mock.rs
index 141edc7c884144455e43f5b7c5b2b62ae246bbab..300aa076515f8095ba19faa8149062cd97c5325a 100644
--- a/substrate/client/network/sync/src/service/mock.rs
+++ b/substrate/client/network/sync/src/service/mock.rs
@@ -45,19 +45,19 @@ mockall::mock! {
 
 	impl<B: BlockT> sc_consensus::Link<B> for ChainSyncInterface<B> {
 		fn blocks_processed(
-			&mut self,
+			&self,
 			imported: usize,
 			count: usize,
 			results: Vec<(Result<BlockImportStatus<NumberFor<B>>, BlockImportError>, B::Hash)>,
 		);
 		fn justification_imported(
-			&mut self,
+			&self,
 			who: PeerId,
 			hash: &B::Hash,
 			number: NumberFor<B>,
 			success: bool,
 		);
-		fn request_justification(&mut self, hash: &B::Hash, number: NumberFor<B>);
+		fn request_justification(&self, hash: &B::Hash, number: NumberFor<B>);
 	}
 }
 
diff --git a/substrate/client/network/sync/src/service/network.rs b/substrate/client/network/sync/src/service/network.rs
index e848b5f62c1b8ab9309bf71029862f0548432097..139e1a986a927b757ac4fc6eed74323b5c7573a0 100644
--- a/substrate/client/network/sync/src/service/network.rs
+++ b/substrate/client/network/sync/src/service/network.rs
@@ -39,9 +39,11 @@ impl<T> Network for T where T: NetworkPeers + NetworkRequest {}
 /// calls the `NetworkService` on its behalf.
 pub struct NetworkServiceProvider {
 	rx: TracingUnboundedReceiver<ToServiceCommand>,
+	handle: NetworkServiceHandle,
 }
 
 /// Commands that `ChainSync` wishes to send to `NetworkService`
+#[derive(Debug)]
 pub enum ToServiceCommand {
 	/// Call `NetworkPeers::disconnect_peer()`
 	DisconnectPeer(PeerId, ProtocolName),
@@ -61,7 +63,7 @@ pub enum ToServiceCommand {
 
 /// Handle that is (temporarily) passed to `ChainSync` so it can
 /// communicate with `NetworkService` through `SyncingEngine`
-#[derive(Clone)]
+#[derive(Debug, Clone)]
 pub struct NetworkServiceHandle {
 	tx: TracingUnboundedSender<ToServiceCommand>,
 }
@@ -99,15 +101,23 @@ impl NetworkServiceHandle {
 
 impl NetworkServiceProvider {
 	/// Create new `NetworkServiceProvider`
-	pub fn new() -> (Self, NetworkServiceHandle) {
+	pub fn new() -> Self {
 		let (tx, rx) = tracing_unbounded("mpsc_network_service_provider", 100_000);
 
-		(Self { rx }, NetworkServiceHandle::new(tx))
+		Self { rx, handle: NetworkServiceHandle::new(tx) }
+	}
+
+	/// Get handle to talk to the provider
+	pub fn handle(&self) -> NetworkServiceHandle {
+		self.handle.clone()
 	}
 
 	/// Run the `NetworkServiceProvider`
-	pub async fn run(mut self, service: Arc<dyn Network + Send + Sync>) {
-		while let Some(inner) = self.rx.next().await {
+	pub async fn run(self, service: Arc<dyn Network + Send + Sync>) {
+		let Self { mut rx, handle } = self;
+		drop(handle);
+
+		while let Some(inner) = rx.next().await {
 			match inner {
 				ToServiceCommand::DisconnectPeer(peer, protocol_name) =>
 					service.disconnect_peer(peer, protocol_name),
@@ -129,7 +139,8 @@ mod tests {
 	// and then reported
 	#[tokio::test]
 	async fn disconnect_and_report_peer() {
-		let (provider, handle) = NetworkServiceProvider::new();
+		let provider = NetworkServiceProvider::new();
+		let handle = provider.handle();
 
 		let peer = PeerId::random();
 		let proto = ProtocolName::from("test-protocol");
diff --git a/substrate/client/network/sync/src/service/syncing_service.rs b/substrate/client/network/sync/src/service/syncing_service.rs
index 08a2b36118a9d4d7d1b780c62a848a789562babf..b56af2b9976a176d54ba2dda287dd7539ddaf160 100644
--- a/substrate/client/network/sync/src/service/syncing_service.rs
+++ b/substrate/client/network/sync/src/service/syncing_service.rs
@@ -177,7 +177,7 @@ impl<B: BlockT> SyncStatusProvider<B> for SyncingService<B> {
 
 impl<B: BlockT> Link<B> for SyncingService<B> {
 	fn blocks_processed(
-		&mut self,
+		&self,
 		imported: usize,
 		count: usize,
 		results: Vec<(Result<BlockImportStatus<NumberFor<B>>, BlockImportError>, B::Hash)>,
@@ -188,7 +188,7 @@ impl<B: BlockT> Link<B> for SyncingService<B> {
 	}
 
 	fn justification_imported(
-		&mut self,
+		&self,
 		who: PeerId,
 		hash: &B::Hash,
 		number: NumberFor<B>,
@@ -199,7 +199,7 @@ impl<B: BlockT> Link<B> for SyncingService<B> {
 			.unbounded_send(ToServiceCommand::JustificationImported(who, *hash, number, success));
 	}
 
-	fn request_justification(&mut self, hash: &B::Hash, number: NumberFor<B>) {
+	fn request_justification(&self, hash: &B::Hash, number: NumberFor<B>) {
 		let _ = self.tx.unbounded_send(ToServiceCommand::RequestJustification(*hash, number));
 	}
 }
diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs
index 81998b7576bbfb807fbfc8c8e1d79ff087ff8906..cdc6de1f8c657ed84c083a1de3dd095b58b01c24 100644
--- a/substrate/client/network/sync/src/strategy.rs
+++ b/substrate/client/network/sync/src/strategy.rs
@@ -16,50 +16,35 @@
 // You should have received a copy of the GNU General Public License
 // along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-//! [`PolkadotSyncingStrategy`] is a proxy between [`crate::engine::SyncingEngine`]
-//! and specific syncing algorithms.
+//! [`SyncingStrategy`] defines an interface [`crate::engine::SyncingEngine`] uses as a specific
+//! syncing algorithm.
+//!
+//! A few different strategies are provided by Substrate out of the box with custom strategies
+//! possible too.
 
 pub mod chain_sync;
 mod disconnected_peers;
+pub mod polkadot;
 mod state;
 pub mod state_sync;
 pub mod warp;
 
 use crate::{
-	block_request_handler::MAX_BLOCKS_IN_RESPONSE,
-	types::{BadPeer, OpaqueStateRequest, OpaqueStateResponse, SyncStatus},
-	LOG_TARGET,
+	pending_responses::ResponseFuture,
+	service::network::NetworkServiceHandle,
+	types::{BadPeer, SyncStatus},
 };
-use chain_sync::{ChainSync, ChainSyncMode};
-use log::{debug, error, info};
-use prometheus_endpoint::Registry;
-use sc_client_api::{BlockBackend, ProofProvider};
 use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock};
 use sc_network::ProtocolName;
-use sc_network_common::sync::{
-	message::{BlockAnnounce, BlockData, BlockRequest},
-	SyncMode,
-};
+use sc_network_common::sync::message::BlockAnnounce;
 use sc_network_types::PeerId;
-use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata};
+use sp_blockchain::Error as ClientError;
 use sp_consensus::BlockOrigin;
 use sp_runtime::{
-	traits::{Block as BlockT, Header, NumberFor},
+	traits::{Block as BlockT, NumberFor},
 	Justifications,
 };
-use state::{StateStrategy, StateStrategyAction};
-use std::{collections::HashMap, sync::Arc};
-use warp::{EncodedProof, WarpProofRequest, WarpSync, WarpSyncAction, WarpSyncConfig};
-
-/// Corresponding `ChainSync` mode.
-fn chain_sync_mode(sync_mode: SyncMode) -> ChainSyncMode {
-	match sync_mode {
-		SyncMode::Full => ChainSyncMode::Full,
-		SyncMode::LightState { skip_proofs, storage_chain_mode } =>
-			ChainSyncMode::LightState { skip_proofs, storage_chain_mode },
-		SyncMode::Warp => ChainSyncMode::Full,
-	}
-}
+use std::any::Any;
 
 /// Syncing strategy for syncing engine to use
 pub trait SyncingStrategy<B: BlockT>: Send
@@ -101,29 +86,16 @@ where
 	/// Report a justification import (successful or not).
 	fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor<B>, success: bool);
 
-	/// Process block response.
-	fn on_block_response(
-		&mut self,
-		peer_id: PeerId,
-		key: StrategyKey,
-		request: BlockRequest<B>,
-		blocks: Vec<BlockData<B>>,
-	);
-
-	/// Process state response.
-	fn on_state_response(
-		&mut self,
-		peer_id: PeerId,
-		key: StrategyKey,
-		response: OpaqueStateResponse,
-	);
-
-	/// Process warp proof response.
-	fn on_warp_proof_response(
+	/// Process generic response.
+	///
+	/// Strategy has to create opaque response and should be to downcast it back into concrete type
+	/// internally. Failure to downcast is an implementation bug.
+	fn on_generic_response(
 		&mut self,
 		peer_id: &PeerId,
 		key: StrategyKey,
-		response: EncodedProof,
+		protocol_name: ProtocolName,
+		response: Box<dyn Any + Send>,
 	);
 
 	/// A batch of blocks that have been processed, with or without errors.
@@ -160,52 +132,32 @@ where
 
 	/// Get actions that should be performed by the owner on the strategy's behalf
 	#[must_use]
-	fn actions(&mut self) -> Result<Vec<SyncingAction<B>>, ClientError>;
-}
-
-/// Syncing configuration containing data for all strategies.
-#[derive(Clone, Debug)]
-pub struct SyncingConfig {
-	/// Syncing mode.
-	pub mode: SyncMode,
-	/// The number of parallel downloads to guard against slow peers.
-	pub max_parallel_downloads: u32,
-	/// Maximum number of blocks to request.
-	pub max_blocks_per_request: u32,
-	/// Prometheus metrics registry.
-	pub metrics_registry: Option<Registry>,
-	/// Protocol name used to send out state requests
-	pub state_request_protocol_name: ProtocolName,
+	fn actions(
+		&mut self,
+		// TODO: Consider making this internal property of the strategy
+		network_service: &NetworkServiceHandle,
+	) -> Result<Vec<SyncingAction<B>>, ClientError>;
 }
 
 /// The key identifying a specific strategy for responses routing.
 #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
-pub enum StrategyKey {
-	/// Warp sync initiated this request.
-	Warp,
-	/// State sync initiated this request.
-	State,
-	/// `ChainSync` initiated this request.
-	ChainSync,
+pub struct StrategyKey(&'static str);
+
+impl StrategyKey {
+	/// Instantiate opaque strategy key.
+	pub const fn new(key: &'static str) -> Self {
+		Self(key)
+	}
 }
 
-#[derive(Debug)]
 pub enum SyncingAction<B: BlockT> {
-	/// Send block request to peer. Always implies dropping a stale block request to the same peer.
-	SendBlockRequest { peer_id: PeerId, key: StrategyKey, request: BlockRequest<B> },
-	/// Send state request to peer.
-	SendStateRequest {
-		peer_id: PeerId,
-		key: StrategyKey,
-		protocol_name: ProtocolName,
-		request: OpaqueStateRequest,
-	},
-	/// Send warp proof request to peer.
-	SendWarpProofRequest {
+	/// Start request to peer.
+	StartRequest {
 		peer_id: PeerId,
 		key: StrategyKey,
-		protocol_name: ProtocolName,
-		request: WarpProofRequest<B>,
+		request: ResponseFuture,
+		// Whether to remove obsolete pending responses.
+		remove_obsolete: bool,
 	},
 	/// Drop stale request.
 	CancelRequest { peer_id: PeerId, key: StrategyKey },
@@ -228,441 +180,16 @@ impl<B: BlockT> SyncingAction<B> {
 	fn is_finished(&self) -> bool {
 		matches!(self, SyncingAction::Finished)
 	}
-}
-
-impl<B: BlockT> From<WarpSyncAction<B>> for SyncingAction<B> {
-	fn from(action: WarpSyncAction<B>) -> Self {
-		match action {
-			WarpSyncAction::SendWarpProofRequest { peer_id, protocol_name, request } =>
-				SyncingAction::SendWarpProofRequest {
-					peer_id,
-					key: StrategyKey::Warp,
-					protocol_name,
-					request,
-				},
-			WarpSyncAction::SendBlockRequest { peer_id, request } =>
-				SyncingAction::SendBlockRequest { peer_id, key: StrategyKey::Warp, request },
-			WarpSyncAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer),
-			WarpSyncAction::Finished => SyncingAction::Finished,
-		}
-	}
-}
-
-impl<B: BlockT> From<StateStrategyAction<B>> for SyncingAction<B> {
-	fn from(action: StateStrategyAction<B>) -> Self {
-		match action {
-			StateStrategyAction::SendStateRequest { peer_id, protocol_name, request } =>
-				SyncingAction::SendStateRequest {
-					peer_id,
-					key: StrategyKey::State,
-					protocol_name,
-					request,
-				},
-			StateStrategyAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer),
-			StateStrategyAction::ImportBlocks { origin, blocks } =>
-				SyncingAction::ImportBlocks { origin, blocks },
-			StateStrategyAction::Finished => SyncingAction::Finished,
-		}
-	}
-}
-
-/// Proxy to specific syncing strategies used in Polkadot.
-pub struct PolkadotSyncingStrategy<B: BlockT, Client> {
-	/// Initial syncing configuration.
-	config: SyncingConfig,
-	/// Client used by syncing strategies.
-	client: Arc<Client>,
-	/// Warp strategy.
-	warp: Option<WarpSync<B, Client>>,
-	/// State strategy.
-	state: Option<StateStrategy<B>>,
-	/// `ChainSync` strategy.`
-	chain_sync: Option<ChainSync<B, Client>>,
-	/// Connected peers and their best blocks used to seed a new strategy when switching to it in
-	/// `PolkadotSyncingStrategy::proceed_to_next`.
-	peer_best_blocks: HashMap<PeerId, (B::Hash, NumberFor<B>)>,
-}
-
-impl<B: BlockT, Client> SyncingStrategy<B> for PolkadotSyncingStrategy<B, Client>
-where
-	B: BlockT,
-	Client: HeaderBackend<B>
-		+ BlockBackend<B>
-		+ HeaderMetadata<B, Error = sp_blockchain::Error>
-		+ ProofProvider<B>
-		+ Send
-		+ Sync
-		+ 'static,
-{
-	fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor<B>) {
-		self.peer_best_blocks.insert(peer_id, (best_hash, best_number));
-
-		self.warp.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number));
-		self.state.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number));
-		self.chain_sync.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number));
-	}
-
-	fn remove_peer(&mut self, peer_id: &PeerId) {
-		self.warp.as_mut().map(|s| s.remove_peer(peer_id));
-		self.state.as_mut().map(|s| s.remove_peer(peer_id));
-		self.chain_sync.as_mut().map(|s| s.remove_peer(peer_id));
-
-		self.peer_best_blocks.remove(peer_id);
-	}
-
-	fn on_validated_block_announce(
-		&mut self,
-		is_best: bool,
-		peer_id: PeerId,
-		announce: &BlockAnnounce<B::Header>,
-	) -> Option<(B::Hash, NumberFor<B>)> {
-		let new_best = if let Some(ref mut warp) = self.warp {
-			warp.on_validated_block_announce(is_best, peer_id, announce)
-		} else if let Some(ref mut state) = self.state {
-			state.on_validated_block_announce(is_best, peer_id, announce)
-		} else if let Some(ref mut chain_sync) = self.chain_sync {
-			chain_sync.on_validated_block_announce(is_best, peer_id, announce)
-		} else {
-			error!(target: LOG_TARGET, "No syncing strategy is active.");
-			debug_assert!(false);
-			Some((announce.header.hash(), *announce.header.number()))
-		};
-
-		if let Some(new_best) = new_best {
-			if let Some(best) = self.peer_best_blocks.get_mut(&peer_id) {
-				*best = new_best;
-			} else {
-				debug!(
-					target: LOG_TARGET,
-					"Cannot update `peer_best_blocks` as peer {peer_id} is not known to `Strategy` \
-					 (already disconnected?)",
-				);
-			}
-		}
-
-		new_best
-	}
-
-	fn set_sync_fork_request(&mut self, peers: Vec<PeerId>, hash: &B::Hash, number: NumberFor<B>) {
-		// Fork requests are only handled by `ChainSync`.
-		if let Some(ref mut chain_sync) = self.chain_sync {
-			chain_sync.set_sync_fork_request(peers.clone(), hash, number);
-		}
-	}
-
-	fn request_justification(&mut self, hash: &B::Hash, number: NumberFor<B>) {
-		// Justifications can only be requested via `ChainSync`.
-		if let Some(ref mut chain_sync) = self.chain_sync {
-			chain_sync.request_justification(hash, number);
-		}
-	}
-
-	fn clear_justification_requests(&mut self) {
-		// Justification requests can only be cleared by `ChainSync`.
-		if let Some(ref mut chain_sync) = self.chain_sync {
-			chain_sync.clear_justification_requests();
-		}
-	}
-
-	fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor<B>, success: bool) {
-		// Only `ChainSync` is interested in justification import.
-		if let Some(ref mut chain_sync) = self.chain_sync {
-			chain_sync.on_justification_import(hash, number, success);
-		}
-	}
-
-	fn on_block_response(
-		&mut self,
-		peer_id: PeerId,
-		key: StrategyKey,
-		request: BlockRequest<B>,
-		blocks: Vec<BlockData<B>>,
-	) {
-		if let (StrategyKey::Warp, Some(ref mut warp)) = (key, &mut self.warp) {
-			warp.on_block_response(peer_id, request, blocks);
-		} else if let (StrategyKey::ChainSync, Some(ref mut chain_sync)) =
-			(key, &mut self.chain_sync)
-		{
-			chain_sync.on_block_response(peer_id, key, request, blocks);
-		} else {
-			error!(
-				target: LOG_TARGET,
-				"`on_block_response()` called with unexpected key {key:?} \
-				 or corresponding strategy is not active.",
-			);
-			debug_assert!(false);
-		}
-	}
-
-	fn on_state_response(
-		&mut self,
-		peer_id: PeerId,
-		key: StrategyKey,
-		response: OpaqueStateResponse,
-	) {
-		if let (StrategyKey::State, Some(ref mut state)) = (key, &mut self.state) {
-			state.on_state_response(peer_id, response);
-		} else if let (StrategyKey::ChainSync, Some(ref mut chain_sync)) =
-			(key, &mut self.chain_sync)
-		{
-			chain_sync.on_state_response(peer_id, key, response);
-		} else {
-			error!(
-				target: LOG_TARGET,
-				"`on_state_response()` called with unexpected key {key:?} \
-				 or corresponding strategy is not active.",
-			);
-			debug_assert!(false);
-		}
-	}
-
-	fn on_warp_proof_response(
-		&mut self,
-		peer_id: &PeerId,
-		key: StrategyKey,
-		response: EncodedProof,
-	) {
-		if let (StrategyKey::Warp, Some(ref mut warp)) = (key, &mut self.warp) {
-			warp.on_warp_proof_response(peer_id, response);
-		} else {
-			error!(
-				target: LOG_TARGET,
-				"`on_warp_proof_response()` called with unexpected key {key:?} \
-				 or warp strategy is not active",
-			);
-			debug_assert!(false);
-		}
-	}
-
-	fn on_blocks_processed(
-		&mut self,
-		imported: usize,
-		count: usize,
-		results: Vec<(Result<BlockImportStatus<NumberFor<B>>, BlockImportError>, B::Hash)>,
-	) {
-		// Only `StateStrategy` and `ChainSync` are interested in block processing notifications.
-		if let Some(ref mut state) = self.state {
-			state.on_blocks_processed(imported, count, results);
-		} else if let Some(ref mut chain_sync) = self.chain_sync {
-			chain_sync.on_blocks_processed(imported, count, results);
-		}
-	}
-
-	fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor<B>) {
-		// Only `ChainSync` is interested in block finalization notifications.
-		if let Some(ref mut chain_sync) = self.chain_sync {
-			chain_sync.on_block_finalized(hash, number);
-		}
-	}
-
-	fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor<B>) {
-		// This is relevant to `ChainSync` only.
-		if let Some(ref mut chain_sync) = self.chain_sync {
-			chain_sync.update_chain_info(best_hash, best_number);
-		}
-	}
-
-	fn is_major_syncing(&self) -> bool {
-		self.warp.is_some() ||
-			self.state.is_some() ||
-			match self.chain_sync {
-				Some(ref s) => s.status().state.is_major_syncing(),
-				None => unreachable!("At least one syncing strategy is active; qed"),
-			}
-	}
-
-	fn num_peers(&self) -> usize {
-		self.peer_best_blocks.len()
-	}
-
-	fn status(&self) -> SyncStatus<B> {
-		// This function presumes that strategies are executed serially and must be refactored
-		// once we have parallel strategies.
-		if let Some(ref warp) = self.warp {
-			warp.status()
-		} else if let Some(ref state) = self.state {
-			state.status()
-		} else if let Some(ref chain_sync) = self.chain_sync {
-			chain_sync.status()
-		} else {
-			unreachable!("At least one syncing strategy is always active; qed")
-		}
-	}
-
-	fn num_downloaded_blocks(&self) -> usize {
-		self.chain_sync
-			.as_ref()
-			.map_or(0, |chain_sync| chain_sync.num_downloaded_blocks())
-	}
-
-	fn num_sync_requests(&self) -> usize {
-		self.chain_sync.as_ref().map_or(0, |chain_sync| chain_sync.num_sync_requests())
-	}
-
-	fn actions(&mut self) -> Result<Vec<SyncingAction<B>>, ClientError> {
-		// This function presumes that strategies are executed serially and must be refactored once
-		// we have parallel strategies.
-		let actions: Vec<_> = if let Some(ref mut warp) = self.warp {
-			warp.actions().map(Into::into).collect()
-		} else if let Some(ref mut state) = self.state {
-			state.actions().map(Into::into).collect()
-		} else if let Some(ref mut chain_sync) = self.chain_sync {
-			chain_sync.actions()?
-		} else {
-			unreachable!("At least one syncing strategy is always active; qed")
-		};
-
-		if actions.iter().any(SyncingAction::is_finished) {
-			self.proceed_to_next()?;
-		}
-
-		Ok(actions)
-	}
-}
-
-impl<B: BlockT, Client> PolkadotSyncingStrategy<B, Client>
-where
-	B: BlockT,
-	Client: HeaderBackend<B>
-		+ BlockBackend<B>
-		+ HeaderMetadata<B, Error = sp_blockchain::Error>
-		+ ProofProvider<B>
-		+ Send
-		+ Sync
-		+ 'static,
-{
-	/// Initialize a new syncing strategy.
-	pub fn new(
-		mut config: SyncingConfig,
-		client: Arc<Client>,
-		warp_sync_config: Option<WarpSyncConfig<B>>,
-		warp_sync_protocol_name: Option<ProtocolName>,
-	) -> Result<Self, ClientError> {
-		if config.max_blocks_per_request > MAX_BLOCKS_IN_RESPONSE as u32 {
-			info!(
-				target: LOG_TARGET,
-				"clamping maximum blocks per request to {MAX_BLOCKS_IN_RESPONSE}",
-			);
-			config.max_blocks_per_request = MAX_BLOCKS_IN_RESPONSE as u32;
-		}
-
-		if let SyncMode::Warp = config.mode {
-			let warp_sync_config = warp_sync_config
-				.expect("Warp sync configuration must be supplied in warp sync mode.");
-			let warp_sync =
-				WarpSync::new(client.clone(), warp_sync_config, warp_sync_protocol_name);
-			Ok(Self {
-				config,
-				client,
-				warp: Some(warp_sync),
-				state: None,
-				chain_sync: None,
-				peer_best_blocks: Default::default(),
-			})
-		} else {
-			let chain_sync = ChainSync::new(
-				chain_sync_mode(config.mode),
-				client.clone(),
-				config.max_parallel_downloads,
-				config.max_blocks_per_request,
-				config.state_request_protocol_name.clone(),
-				config.metrics_registry.as_ref(),
-				std::iter::empty(),
-			)?;
-			Ok(Self {
-				config,
-				client,
-				warp: None,
-				state: None,
-				chain_sync: Some(chain_sync),
-				peer_best_blocks: Default::default(),
-			})
-		}
-	}
-
-	/// Proceed with the next strategy if the active one finished.
-	pub fn proceed_to_next(&mut self) -> Result<(), ClientError> {
-		// The strategies are switched as `WarpSync` -> `StateStrategy` -> `ChainSync`.
-		if let Some(ref mut warp) = self.warp {
-			match warp.take_result() {
-				Some(res) => {
-					info!(
-						target: LOG_TARGET,
-						"Warp sync is complete, continuing with state sync."
-					);
-					let state_sync = StateStrategy::new(
-						self.client.clone(),
-						res.target_header,
-						res.target_body,
-						res.target_justifications,
-						false,
-						self.peer_best_blocks
-							.iter()
-							.map(|(peer_id, (_, best_number))| (*peer_id, *best_number)),
-						self.config.state_request_protocol_name.clone(),
-					);
-
-					self.warp = None;
-					self.state = Some(state_sync);
-					Ok(())
-				},
-				None => {
-					error!(
-						target: LOG_TARGET,
-						"Warp sync failed. Continuing with full sync."
-					);
-					let chain_sync = match ChainSync::new(
-						chain_sync_mode(self.config.mode),
-						self.client.clone(),
-						self.config.max_parallel_downloads,
-						self.config.max_blocks_per_request,
-						self.config.state_request_protocol_name.clone(),
-						self.config.metrics_registry.as_ref(),
-						self.peer_best_blocks.iter().map(|(peer_id, (best_hash, best_number))| {
-							(*peer_id, *best_hash, *best_number)
-						}),
-					) {
-						Ok(chain_sync) => chain_sync,
-						Err(e) => {
-							error!(target: LOG_TARGET, "Failed to start `ChainSync`.");
-							return Err(e)
-						},
-					};
-
-					self.warp = None;
-					self.chain_sync = Some(chain_sync);
-					Ok(())
-				},
-			}
-		} else if let Some(state) = &self.state {
-			if state.is_succeeded() {
-				info!(target: LOG_TARGET, "State sync is complete, continuing with block sync.");
-			} else {
-				error!(target: LOG_TARGET, "State sync failed. Falling back to full sync.");
-			}
-			let chain_sync = match ChainSync::new(
-				chain_sync_mode(self.config.mode),
-				self.client.clone(),
-				self.config.max_parallel_downloads,
-				self.config.max_blocks_per_request,
-				self.config.state_request_protocol_name.clone(),
-				self.config.metrics_registry.as_ref(),
-				self.peer_best_blocks.iter().map(|(peer_id, (best_hash, best_number))| {
-					(*peer_id, *best_hash, *best_number)
-				}),
-			) {
-				Ok(chain_sync) => chain_sync,
-				Err(e) => {
-					error!(target: LOG_TARGET, "Failed to start `ChainSync`.");
-					return Err(e);
-				},
-			};
 
-			self.state = None;
-			self.chain_sync = Some(chain_sync);
-			Ok(())
-		} else {
-			unreachable!("Only warp & state strategies can finish; qed")
+	#[cfg(test)]
+	pub(crate) fn name(&self) -> &'static str {
+		match self {
+			Self::StartRequest { .. } => "StartRequest",
+			Self::CancelRequest { .. } => "CancelRequest",
+			Self::DropPeer(_) => "DropPeer",
+			Self::ImportBlocks { .. } => "ImportBlocks",
+			Self::ImportJustifications { .. } => "ImportJustifications",
+			Self::Finished => "Finished",
 		}
 	}
 }
diff --git a/substrate/client/network/sync/src/strategy/chain_sync.rs b/substrate/client/network/sync/src/strategy/chain_sync.rs
index 202033e8e00afc42cd3369baf6e92cd6a9716c0c..18170b77881ea4c79a2460006317e8331dab1b71 100644
--- a/substrate/client/network/sync/src/strategy/chain_sync.rs
+++ b/substrate/client/network/sync/src/strategy/chain_sync.rs
@@ -29,24 +29,28 @@
 //! order to update it.
 
 use crate::{
+	block_relay_protocol::{BlockDownloader, BlockResponseError},
 	blocks::BlockCollection,
 	justification_requests::ExtraRequests,
-	schema::v1::StateResponse,
+	schema::v1::{StateRequest, StateResponse},
+	service::network::NetworkServiceHandle,
 	strategy::{
 		disconnected_peers::DisconnectedPeers,
 		state_sync::{ImportResult, StateSync, StateSyncProvider},
-		warp::{EncodedProof, WarpSyncPhase, WarpSyncProgress},
+		warp::{WarpSyncPhase, WarpSyncProgress},
 		StrategyKey, SyncingAction, SyncingStrategy,
 	},
-	types::{BadPeer, OpaqueStateRequest, OpaqueStateResponse, SyncState, SyncStatus},
+	types::{BadPeer, SyncState, SyncStatus},
 	LOG_TARGET,
 };
 
+use futures::{channel::oneshot, FutureExt};
 use log::{debug, error, info, trace, warn};
 use prometheus_endpoint::{register, Gauge, PrometheusError, Registry, U64};
+use prost::Message;
 use sc_client_api::{blockchain::BlockGap, BlockBackend, ProofProvider};
 use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock};
-use sc_network::ProtocolName;
+use sc_network::{IfDisconnected, ProtocolName};
 use sc_network_common::sync::message::{
 	BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, FromBlock,
 };
@@ -62,6 +66,7 @@ use sp_runtime::{
 };
 
 use std::{
+	any::Any,
 	collections::{HashMap, HashSet},
 	ops::Range,
 	sync::Arc,
@@ -123,6 +128,9 @@ mod rep {
 
 	/// Peer response data does not have requested bits.
 	pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response");
+
+	/// We received a message that failed to decode.
+	pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message");
 }
 
 struct Metrics {
@@ -324,9 +332,11 @@ pub struct ChainSync<B: BlockT, Client> {
 	downloaded_blocks: usize,
 	/// State sync in progress, if any.
 	state_sync: Option<StateSync<B, Client>>,
-	/// Enable importing existing blocks. This is used used after the state download to
+	/// Enable importing existing blocks. This is used after the state download to
 	/// catch up to the latest state while re-importing blocks.
 	import_existing: bool,
+	/// Block downloader
+	block_downloader: Arc<dyn BlockDownloader<B>>,
 	/// Gap download process.
 	gap_sync: Option<GapSync<B>>,
 	/// Pending actions.
@@ -348,11 +358,10 @@ where
 {
 	fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor<B>) {
 		match self.add_peer_inner(peer_id, best_hash, best_number) {
-			Ok(Some(request)) => self.actions.push(SyncingAction::SendBlockRequest {
-				peer_id,
-				key: StrategyKey::ChainSync,
-				request,
-			}),
+			Ok(Some(request)) => {
+				let action = self.create_block_request_action(peer_id, request);
+				self.actions.push(action);
+			},
 			Ok(None) => {},
 			Err(bad_peer) => self.actions.push(SyncingAction::DropPeer(bad_peer)),
 		}
@@ -564,82 +573,77 @@ where
 		self.allowed_requests.set_all();
 	}
 
-	fn on_block_response(
+	fn on_generic_response(
 		&mut self,
-		peer_id: PeerId,
+		peer_id: &PeerId,
 		key: StrategyKey,
-		request: BlockRequest<B>,
-		blocks: Vec<BlockData<B>>,
+		protocol_name: ProtocolName,
+		response: Box<dyn Any + Send>,
 	) {
-		if key != StrategyKey::ChainSync {
-			error!(
+		if Self::STRATEGY_KEY != key {
+			warn!(
 				target: LOG_TARGET,
-				"`on_block_response()` called with unexpected key {key:?} for chain sync",
+				"Unexpected generic response strategy key {key:?}, protocol {protocol_name}",
 			);
 			debug_assert!(false);
+			return;
 		}
-		let block_response = BlockResponse::<B> { id: request.id, blocks };
 
-		let blocks_range = || match (
-			block_response
-				.blocks
-				.first()
-				.and_then(|b| b.header.as_ref().map(|h| h.number())),
-			block_response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())),
-		) {
-			(Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last),
-			(Some(first), Some(_)) => format!(" ({})", first),
-			_ => Default::default(),
-		};
-		trace!(
-			target: LOG_TARGET,
-			"BlockResponse {} from {} with {} blocks {}",
-			block_response.id,
-			peer_id,
-			block_response.blocks.len(),
-			blocks_range(),
-		);
+		if protocol_name == self.state_request_protocol_name {
+			let Ok(response) = response.downcast::<Vec<u8>>() else {
+				warn!(target: LOG_TARGET, "Failed to downcast state response");
+				debug_assert!(false);
+				return;
+			};
 
-		let res = if request.fields == BlockAttributes::JUSTIFICATION {
-			self.on_block_justification(peer_id, block_response)
-		} else {
-			self.on_block_data(&peer_id, Some(request), block_response)
-		};
+			if let Err(bad_peer) = self.on_state_data(&peer_id, &response) {
+				self.actions.push(SyncingAction::DropPeer(bad_peer));
+			}
+		} else if &protocol_name == self.block_downloader.protocol_name() {
+			let Ok(response) = response
+				.downcast::<(BlockRequest<B>, Result<Vec<BlockData<B>>, BlockResponseError>)>()
+			else {
+				warn!(target: LOG_TARGET, "Failed to downcast block response");
+				debug_assert!(false);
+				return;
+			};
 
-		if let Err(bad_peer) = res {
-			self.actions.push(SyncingAction::DropPeer(bad_peer));
-		}
-	}
+			let (request, response) = *response;
+			let blocks = match response {
+				Ok(blocks) => blocks,
+				Err(BlockResponseError::DecodeFailed(e)) => {
+					debug!(
+						target: LOG_TARGET,
+						"Failed to decode block response from peer {:?}: {:?}.",
+						peer_id,
+						e
+					);
+					self.actions.push(SyncingAction::DropPeer(BadPeer(*peer_id, rep::BAD_MESSAGE)));
+					return;
+				},
+				Err(BlockResponseError::ExtractionFailed(e)) => {
+					debug!(
+						target: LOG_TARGET,
+						"Failed to extract blocks from peer response {:?}: {:?}.",
+						peer_id,
+						e
+					);
+					self.actions.push(SyncingAction::DropPeer(BadPeer(*peer_id, rep::BAD_MESSAGE)));
+					return;
+				},
+			};
 
-	fn on_state_response(
-		&mut self,
-		peer_id: PeerId,
-		key: StrategyKey,
-		response: OpaqueStateResponse,
-	) {
-		if key != StrategyKey::ChainSync {
-			error!(
+			if let Err(bad_peer) = self.on_block_response(peer_id, key, request, blocks) {
+				self.actions.push(SyncingAction::DropPeer(bad_peer));
+			}
+		} else {
+			warn!(
 				target: LOG_TARGET,
-				"`on_state_response()` called with unexpected key {key:?} for chain sync",
+				"Unexpected generic response protocol {protocol_name}, strategy key \
+				{key:?}",
 			);
 			debug_assert!(false);
 		}
-		if let Err(bad_peer) = self.on_state_data(&peer_id, response) {
-			self.actions.push(SyncingAction::DropPeer(bad_peer));
-		}
-	}
-
-	fn on_warp_proof_response(
-		&mut self,
-		_peer_id: &PeerId,
-		_key: StrategyKey,
-		_response: EncodedProof,
-	) {
-		error!(
-			target: LOG_TARGET,
-			"`on_warp_proof_response()` called for chain sync strategy",
-		);
-		debug_assert!(false);
 	}
 
 	fn on_blocks_processed(
@@ -863,30 +867,56 @@ where
 			.count()
 	}
 
-	fn actions(&mut self) -> Result<Vec<SyncingAction<B>>, ClientError> {
+	fn actions(
+		&mut self,
+		network_service: &NetworkServiceHandle,
+	) -> Result<Vec<SyncingAction<B>>, ClientError> {
 		if !self.peers.is_empty() && self.queue_blocks.is_empty() {
 			if let Some((hash, number, skip_proofs)) = self.pending_state_sync_attempt.take() {
 				self.attempt_state_sync(hash, number, skip_proofs);
 			}
 		}
 
-		let block_requests = self.block_requests().into_iter().map(|(peer_id, request)| {
-			SyncingAction::SendBlockRequest { peer_id, key: StrategyKey::ChainSync, request }
-		});
+		let block_requests = self
+			.block_requests()
+			.into_iter()
+			.map(|(peer_id, request)| self.create_block_request_action(peer_id, request))
+			.collect::<Vec<_>>();
 		self.actions.extend(block_requests);
 
-		let justification_requests =
-			self.justification_requests().into_iter().map(|(peer_id, request)| {
-				SyncingAction::SendBlockRequest { peer_id, key: StrategyKey::ChainSync, request }
-			});
+		let justification_requests = self
+			.justification_requests()
+			.into_iter()
+			.map(|(peer_id, request)| self.create_block_request_action(peer_id, request))
+			.collect::<Vec<_>>();
 		self.actions.extend(justification_requests);
 
 		let state_request = self.state_request().into_iter().map(|(peer_id, request)| {
-			SyncingAction::SendStateRequest {
+			trace!(
+				target: LOG_TARGET,
+				"Created `StrategyRequest` to {peer_id}.",
+			);
+
+			let (tx, rx) = oneshot::channel();
+
+			network_service.start_request(
 				peer_id,
-				key: StrategyKey::ChainSync,
-				protocol_name: self.state_request_protocol_name.clone(),
-				request,
+				self.state_request_protocol_name.clone(),
+				request.encode_to_vec(),
+				tx,
+				IfDisconnected::ImmediateError,
+			);
+
+			SyncingAction::StartRequest {
+				peer_id,
+				key: Self::STRATEGY_KEY,
+				request: async move {
+					Ok(rx.await?.and_then(|(response, protocol_name)| {
+						Ok((Box::new(response) as Box<dyn Any + Send>, protocol_name))
+					}))
+				}
+				.boxed(),
+				remove_obsolete: false,
 			}
 		});
 		self.actions.extend(state_request);
@@ -906,6 +936,9 @@ where
 		+ Sync
 		+ 'static,
 {
+	/// Strategy key used by chain sync.
+	pub const STRATEGY_KEY: StrategyKey = StrategyKey::new("ChainSync");
+
 	/// Create a new instance.
 	pub fn new(
 		mode: ChainSyncMode,
@@ -913,6 +946,7 @@ where
 		max_parallel_downloads: u32,
 		max_blocks_per_request: u32,
 		state_request_protocol_name: ProtocolName,
+		block_downloader: Arc<dyn BlockDownloader<B>>,
 		metrics_registry: Option<&Registry>,
 		initial_peers: impl Iterator<Item = (PeerId, B::Hash, NumberFor<B>)>,
 	) -> Result<Self, ClientError> {
@@ -935,6 +969,7 @@ where
 			downloaded_blocks: 0,
 			state_sync: None,
 			import_existing: false,
+			block_downloader,
 			gap_sync: None,
 			actions: Vec::new(),
 			metrics: metrics_registry.and_then(|r| match Metrics::register(r) {
@@ -1075,6 +1110,33 @@ where
 		}
 	}
 
+	fn create_block_request_action(
+		&mut self,
+		peer_id: PeerId,
+		request: BlockRequest<B>,
+	) -> SyncingAction<B> {
+		let downloader = self.block_downloader.clone();
+
+		SyncingAction::StartRequest {
+			peer_id,
+			key: Self::STRATEGY_KEY,
+			request: async move {
+				Ok(downloader.download_blocks(peer_id, request.clone()).await?.and_then(
+					|(response, protocol_name)| {
+						let decoded_response =
+							downloader.block_response_into_blocks(&request, response);
+						let result = Box::new((request, decoded_response)) as Box<dyn Any + Send>;
+						Ok((result, protocol_name))
+					},
+				))
+			}
+			.boxed(),
+			// Sending block request implies dropping obsolete pending response as we are not
+			// interested in it anymore.
+			remove_obsolete: true,
+		}
+	}
+
 	/// Submit a block response for processing.
 	#[must_use]
 	fn on_block_data(
@@ -1248,11 +1310,8 @@ where
 								state: next_state,
 							};
 							let request = ancestry_request::<B>(next_num);
-							self.actions.push(SyncingAction::SendBlockRequest {
-								peer_id: *peer_id,
-								key: StrategyKey::ChainSync,
-								request,
-							});
+							let action = self.create_block_request_action(*peer_id, request);
+							self.actions.push(action);
 							return Ok(());
 						} else {
 							// Ancestry search is complete. Check if peer is on a stale fork unknown
@@ -1334,6 +1393,49 @@ where
 		Ok(())
 	}
 
+	fn on_block_response(
+		&mut self,
+		peer_id: &PeerId,
+		key: StrategyKey,
+		request: BlockRequest<B>,
+		blocks: Vec<BlockData<B>>,
+	) -> Result<(), BadPeer> {
+		if key != Self::STRATEGY_KEY {
+			error!(
+				target: LOG_TARGET,
+				"`on_block_response()` called with unexpected key {key:?} for chain sync",
+			);
+			debug_assert!(false);
+		}
+		let block_response = BlockResponse::<B> { id: request.id, blocks };
+
+		let blocks_range = || match (
+			block_response
+				.blocks
+				.first()
+				.and_then(|b| b.header.as_ref().map(|h| h.number())),
+			block_response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())),
+		) {
+			(Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last),
+			(Some(first), Some(_)) => format!(" ({})", first),
+			_ => Default::default(),
+		};
+		trace!(
+			target: LOG_TARGET,
+			"BlockResponse {} from {} with {} blocks {}",
+			block_response.id,
+			peer_id,
+			block_response.blocks.len(),
+			blocks_range(),
+		);
+
+		if request.fields == BlockAttributes::JUSTIFICATION {
+			self.on_block_justification(*peer_id, block_response)
+		} else {
+			self.on_block_data(peer_id, Some(request), block_response)
+		}
+	}
+
 	/// Submit a justification response for processing.
 	#[must_use]
 	fn on_block_justification(
@@ -1548,10 +1650,8 @@ where
 				PeerSyncState::DownloadingGap(_) |
 				PeerSyncState::DownloadingState => {
 					// Cancel a request first, as `add_peer` may generate a new request.
-					self.actions.push(SyncingAction::CancelRequest {
-						peer_id,
-						key: StrategyKey::ChainSync,
-					});
+					self.actions
+						.push(SyncingAction::CancelRequest { peer_id, key: Self::STRATEGY_KEY });
 					self.add_peer(peer_id, peer_sync.best_hash, peer_sync.best_number);
 				},
 				PeerSyncState::DownloadingJustification(_) => {
@@ -1831,7 +1931,7 @@ where
 	}
 
 	/// Get a state request scheduled by sync to be sent out (if any).
-	fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> {
+	fn state_request(&mut self) -> Option<(PeerId, StateRequest)> {
 		if self.allowed_requests.is_empty() {
 			return None;
 		}
@@ -1855,7 +1955,7 @@ where
 					let request = sync.next_request();
 					trace!(target: LOG_TARGET, "New StateRequest for {}: {:?}", id, request);
 					self.allowed_requests.clear();
-					return Some((*id, OpaqueStateRequest(Box::new(request))));
+					return Some((*id, request));
 				}
 			}
 		}
@@ -1863,19 +1963,18 @@ where
 	}
 
 	#[must_use]
-	fn on_state_data(
-		&mut self,
-		peer_id: &PeerId,
-		response: OpaqueStateResponse,
-	) -> Result<(), BadPeer> {
-		let response: Box<StateResponse> = response.0.downcast().map_err(|_error| {
-			error!(
-				target: LOG_TARGET,
-				"Failed to downcast opaque state response, this is an implementation bug."
-			);
+	fn on_state_data(&mut self, peer_id: &PeerId, response: &[u8]) -> Result<(), BadPeer> {
+		let response = match StateResponse::decode(response) {
+			Ok(response) => response,
+			Err(error) => {
+				debug!(
+					target: LOG_TARGET,
+					"Failed to decode state response from peer {peer_id:?}: {error:?}.",
+				);
 
-			BadPeer(*peer_id, rep::BAD_RESPONSE)
-		})?;
+				return Err(BadPeer(*peer_id, rep::BAD_RESPONSE));
+			},
+		};
 
 		if let Some(peer) = self.peers.get_mut(peer_id) {
 			if let PeerSyncState::DownloadingState = peer.state {
@@ -1891,7 +1990,7 @@ where
 				response.entries.len(),
 				response.proof.len(),
 			);
-			sync.import(*response)
+			sync.import(response)
 		} else {
 			debug!(target: LOG_TARGET, "Ignored obsolete state response from {peer_id}");
 			return Err(BadPeer(*peer_id, rep::NOT_REQUESTED));
diff --git a/substrate/client/network/sync/src/strategy/chain_sync/test.rs b/substrate/client/network/sync/src/strategy/chain_sync/test.rs
index d13f034e2e8da25df6171c0e21c607d4af722bc2..4a5682722389a9b8e4a6fbbb7c5060d140c057e7 100644
--- a/substrate/client/network/sync/src/strategy/chain_sync/test.rs
+++ b/substrate/client/network/sync/src/strategy/chain_sync/test.rs
@@ -19,16 +19,64 @@
 //! Tests of [`ChainSync`].
 
 use super::*;
-use futures::executor::block_on;
+use crate::{
+	block_relay_protocol::BlockResponseError, mock::MockBlockDownloader,
+	service::network::NetworkServiceProvider,
+};
+use futures::{channel::oneshot::Canceled, executor::block_on};
 use sc_block_builder::BlockBuilderBuilder;
+use sc_network::RequestFailure;
 use sc_network_common::sync::message::{BlockAnnounce, BlockData, BlockState, FromBlock};
 use sp_blockchain::HeaderBackend;
+use std::sync::Mutex;
 use substrate_test_runtime_client::{
 	runtime::{Block, Hash, Header},
 	BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClient,
 	TestClientBuilder, TestClientBuilderExt,
 };
 
+#[derive(Debug)]
+struct ProxyBlockDownloader {
+	protocol_name: ProtocolName,
+	sender: std::sync::mpsc::Sender<BlockRequest<Block>>,
+	request: Mutex<std::sync::mpsc::Receiver<BlockRequest<Block>>>,
+}
+
+#[async_trait::async_trait]
+impl BlockDownloader<Block> for ProxyBlockDownloader {
+	fn protocol_name(&self) -> &ProtocolName {
+		&self.protocol_name
+	}
+
+	async fn download_blocks(
+		&self,
+		_who: PeerId,
+		request: BlockRequest<Block>,
+	) -> Result<Result<(Vec<u8>, ProtocolName), RequestFailure>, Canceled> {
+		self.sender.send(request).unwrap();
+		Ok(Ok((Vec::new(), self.protocol_name.clone())))
+	}
+
+	fn block_response_into_blocks(
+		&self,
+		_request: &BlockRequest<Block>,
+		_response: Vec<u8>,
+	) -> Result<Vec<BlockData<Block>>, BlockResponseError> {
+		Ok(Vec::new())
+	}
+}
+
+impl ProxyBlockDownloader {
+	fn new(protocol_name: ProtocolName) -> Self {
+		let (sender, receiver) = std::sync::mpsc::channel();
+		Self { protocol_name, sender, request: Mutex::new(receiver) }
+	}
+
+	fn next_request(&self) -> BlockRequest<Block> {
+		self.request.lock().unwrap().recv().unwrap()
+	}
+}
+
 #[test]
 fn processes_empty_response_on_justification_request_for_unknown_block() {
 	// if we ask for a justification for a given block to a peer that doesn't know that block
@@ -44,6 +92,7 @@ fn processes_empty_response_on_justification_request_for_unknown_block() {
 		1,
 		64,
 		ProtocolName::Static(""),
+		Arc::new(MockBlockDownloader::new()),
 		None,
 		std::iter::empty(),
 	)
@@ -108,6 +157,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() {
 		1,
 		8,
 		ProtocolName::Static(""),
+		Arc::new(MockBlockDownloader::new()),
 		None,
 		std::iter::empty(),
 	)
@@ -140,13 +190,15 @@ fn restart_doesnt_affect_peers_downloading_finality_data() {
 	sync.add_peer(peer_id1, Hash::random(), 42);
 	sync.add_peer(peer_id2, Hash::random(), 10);
 
+	let network_provider = NetworkServiceProvider::new();
+	let network_handle = network_provider.handle();
+
 	// we wil send block requests to these peers
 	// for these blocks we don't know about
-	let actions = sync.actions().unwrap();
+	let actions = sync.actions(&network_handle).unwrap();
 	assert_eq!(actions.len(), 2);
 	assert!(actions.iter().all(|action| match action {
-		SyncingAction::SendBlockRequest { peer_id, .. } =>
-			peer_id == &peer_id1 || peer_id == &peer_id2,
+		SyncingAction::StartRequest { peer_id, .. } => peer_id == &peer_id1 || peer_id == &peer_id2,
 		_ => false,
 	}));
 
@@ -176,7 +228,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() {
 	sync.restart();
 
 	// which should make us cancel and send out again block requests to the first two peers
-	let actions = sync.actions().unwrap();
+	let actions = sync.actions(&network_handle).unwrap();
 	assert_eq!(actions.len(), 4);
 	let mut cancelled_first = HashSet::new();
 	assert!(actions.iter().all(|action| match action {
@@ -184,7 +236,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() {
 			cancelled_first.insert(peer_id);
 			peer_id == &peer_id1 || peer_id == &peer_id2
 		},
-		SyncingAction::SendBlockRequest { peer_id, .. } => {
+		SyncingAction::StartRequest { peer_id, .. } => {
 			assert!(cancelled_first.remove(peer_id));
 			peer_id == &peer_id1 || peer_id == &peer_id2
 		},
@@ -311,6 +363,7 @@ fn do_ancestor_search_when_common_block_to_best_queued_gap_is_to_big() {
 		5,
 		64,
 		ProtocolName::Static(""),
+		Arc::new(MockBlockDownloader::new()),
 		None,
 		std::iter::empty(),
 	)
@@ -459,12 +512,16 @@ fn can_sync_huge_fork() {
 
 	let info = client.info();
 
+	let protocol_name = ProtocolName::Static("");
+	let proxy_block_downloader = Arc::new(ProxyBlockDownloader::new(protocol_name.clone()));
+
 	let mut sync = ChainSync::new(
 		ChainSyncMode::Full,
 		client.clone(),
 		5,
 		64,
-		ProtocolName::Static(""),
+		protocol_name,
+		proxy_block_downloader.clone(),
 		None,
 		std::iter::empty(),
 	)
@@ -494,18 +551,21 @@ fn can_sync_huge_fork() {
 		let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1];
 		let response = create_block_response(vec![block.clone()]);
 
-		sync.on_block_data(&peer_id1, Some(request), response).unwrap();
+		sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap();
 
-		let actions = sync.take_actions().collect::<Vec<_>>();
+		let mut actions = sync.take_actions().collect::<Vec<_>>();
 
 		request = if actions.is_empty() {
 			// We found the ancestor
 			break
 		} else {
 			assert_eq!(actions.len(), 1);
-			match &actions[0] {
-				SyncingAction::SendBlockRequest { peer_id: _, request, key: _ } => request.clone(),
-				action @ _ => panic!("Unexpected action: {action:?}"),
+			match actions.pop().unwrap() {
+				SyncingAction::StartRequest { request, .. } => {
+					block_on(request).unwrap().unwrap();
+					proxy_block_downloader.next_request()
+				},
+				action => panic!("Unexpected action: {}", action.name()),
 			}
 		};
 
@@ -600,12 +660,16 @@ fn syncs_fork_without_duplicate_requests() {
 
 	let info = client.info();
 
+	let protocol_name = ProtocolName::Static("");
+	let proxy_block_downloader = Arc::new(ProxyBlockDownloader::new(protocol_name.clone()));
+
 	let mut sync = ChainSync::new(
 		ChainSyncMode::Full,
 		client.clone(),
 		5,
 		64,
-		ProtocolName::Static(""),
+		protocol_name,
+		proxy_block_downloader.clone(),
 		None,
 		std::iter::empty(),
 	)
@@ -637,16 +701,19 @@ fn syncs_fork_without_duplicate_requests() {
 
 		sync.on_block_data(&peer_id1, Some(request), response).unwrap();
 
-		let actions = sync.take_actions().collect::<Vec<_>>();
+		let mut actions = sync.take_actions().collect::<Vec<_>>();
 
 		request = if actions.is_empty() {
 			// We found the ancestor
 			break
 		} else {
 			assert_eq!(actions.len(), 1);
-			match &actions[0] {
-				SyncingAction::SendBlockRequest { peer_id: _, request, key: _ } => request.clone(),
-				action @ _ => panic!("Unexpected action: {action:?}"),
+			match actions.pop().unwrap() {
+				SyncingAction::StartRequest { request, .. } => {
+					block_on(request).unwrap().unwrap();
+					proxy_block_downloader.next_request()
+				},
+				action => panic!("Unexpected action: {}", action.name()),
 			}
 		};
 
@@ -750,6 +817,7 @@ fn removes_target_fork_on_disconnect() {
 		1,
 		64,
 		ProtocolName::Static(""),
+		Arc::new(MockBlockDownloader::new()),
 		None,
 		std::iter::empty(),
 	)
@@ -784,6 +852,7 @@ fn can_import_response_with_missing_blocks() {
 		1,
 		64,
 		ProtocolName::Static(""),
+		Arc::new(MockBlockDownloader::new()),
 		None,
 		std::iter::empty(),
 	)
@@ -824,6 +893,7 @@ fn sync_restart_removes_block_but_not_justification_requests() {
 		1,
 		64,
 		ProtocolName::Static(""),
+		Arc::new(MockBlockDownloader::new()),
 		None,
 		std::iter::empty(),
 	)
@@ -898,17 +968,17 @@ fn sync_restart_removes_block_but_not_justification_requests() {
 			SyncingAction::CancelRequest { peer_id, key: _ } => {
 				pending_responses.remove(&peer_id);
 			},
-			SyncingAction::SendBlockRequest { peer_id, .. } => {
+			SyncingAction::StartRequest { peer_id, .. } => {
 				// we drop obsolete response, but don't register a new request, it's checked in
 				// the `assert!` below
 				pending_responses.remove(&peer_id);
 			},
-			action @ _ => panic!("Unexpected action: {action:?}"),
+			action @ _ => panic!("Unexpected action: {}", action.name()),
 		}
 	}
 	assert!(actions.iter().any(|action| {
 		match action {
-			SyncingAction::SendBlockRequest { peer_id, .. } => peer_id == &peers[0],
+			SyncingAction::StartRequest { peer_id, .. } => peer_id == &peers[0],
 			_ => false,
 		}
 	}));
@@ -975,6 +1045,7 @@ fn request_across_forks() {
 		5,
 		64,
 		ProtocolName::Static(""),
+		Arc::new(MockBlockDownloader::new()),
 		None,
 		std::iter::empty(),
 	)
diff --git a/substrate/client/network/sync/src/strategy/polkadot.rs b/substrate/client/network/sync/src/strategy/polkadot.rs
new file mode 100644
index 0000000000000000000000000000000000000000..44b05966af064c48066c04d17a78a9e05ac3c208
--- /dev/null
+++ b/substrate/client/network/sync/src/strategy/polkadot.rs
@@ -0,0 +1,481 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+//! [`PolkadotSyncingStrategy`] is a proxy between [`crate::engine::SyncingEngine`]
+//! and specific syncing algorithms.
+
+use crate::{
+	block_relay_protocol::BlockDownloader,
+	block_request_handler::MAX_BLOCKS_IN_RESPONSE,
+	service::network::NetworkServiceHandle,
+	strategy::{
+		chain_sync::{ChainSync, ChainSyncMode},
+		state::StateStrategy,
+		warp::{WarpSync, WarpSyncConfig},
+		StrategyKey, SyncingAction, SyncingStrategy,
+	},
+	types::SyncStatus,
+	LOG_TARGET,
+};
+use log::{debug, error, info, warn};
+use prometheus_endpoint::Registry;
+use sc_client_api::{BlockBackend, ProofProvider};
+use sc_consensus::{BlockImportError, BlockImportStatus};
+use sc_network::ProtocolName;
+use sc_network_common::sync::{message::BlockAnnounce, SyncMode};
+use sc_network_types::PeerId;
+use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata};
+use sp_runtime::traits::{Block as BlockT, Header, NumberFor};
+use std::{any::Any, collections::HashMap, sync::Arc};
+
+/// Corresponding `ChainSync` mode.
+fn chain_sync_mode(sync_mode: SyncMode) -> ChainSyncMode {
+	match sync_mode {
+		SyncMode::Full => ChainSyncMode::Full,
+		SyncMode::LightState { skip_proofs, storage_chain_mode } =>
+			ChainSyncMode::LightState { skip_proofs, storage_chain_mode },
+		SyncMode::Warp => ChainSyncMode::Full,
+	}
+}
+
+/// Syncing configuration containing data for [`PolkadotSyncingStrategy`].
+#[derive(Clone, Debug)]
+pub struct PolkadotSyncingStrategyConfig<Block>
+where
+	Block: BlockT,
+{
+	/// Syncing mode.
+	pub mode: SyncMode,
+	/// The number of parallel downloads to guard against slow peers.
+	pub max_parallel_downloads: u32,
+	/// Maximum number of blocks to request.
+	pub max_blocks_per_request: u32,
+	/// Prometheus metrics registry.
+	pub metrics_registry: Option<Registry>,
+	/// Protocol name used to send out state requests
+	pub state_request_protocol_name: ProtocolName,
+	/// Block downloader
+	pub block_downloader: Arc<dyn BlockDownloader<Block>>,
+}
+
+/// Proxy to specific syncing strategies used in Polkadot.
+pub struct PolkadotSyncingStrategy<B: BlockT, Client> {
+	/// Initial syncing configuration.
+	config: PolkadotSyncingStrategyConfig<B>,
+	/// Client used by syncing strategies.
+	client: Arc<Client>,
+	/// Warp strategy.
+	warp: Option<WarpSync<B, Client>>,
+	/// State strategy.
+	state: Option<StateStrategy<B>>,
+	/// `ChainSync` strategy.`
+	chain_sync: Option<ChainSync<B, Client>>,
+	/// Connected peers and their best blocks used to seed a new strategy when switching to it in
+	/// `PolkadotSyncingStrategy::proceed_to_next`.
+	peer_best_blocks: HashMap<PeerId, (B::Hash, NumberFor<B>)>,
+}
+
+impl<B: BlockT, Client> SyncingStrategy<B> for PolkadotSyncingStrategy<B, Client>
+where
+	B: BlockT,
+	Client: HeaderBackend<B>
+		+ BlockBackend<B>
+		+ HeaderMetadata<B, Error = sp_blockchain::Error>
+		+ ProofProvider<B>
+		+ Send
+		+ Sync
+		+ 'static,
+{
+	fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor<B>) {
+		self.peer_best_blocks.insert(peer_id, (best_hash, best_number));
+
+		self.warp.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number));
+		self.state.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number));
+		self.chain_sync.as_mut().map(|s| s.add_peer(peer_id, best_hash, best_number));
+	}
+
+	fn remove_peer(&mut self, peer_id: &PeerId) {
+		self.warp.as_mut().map(|s| s.remove_peer(peer_id));
+		self.state.as_mut().map(|s| s.remove_peer(peer_id));
+		self.chain_sync.as_mut().map(|s| s.remove_peer(peer_id));
+
+		self.peer_best_blocks.remove(peer_id);
+	}
+
+	fn on_validated_block_announce(
+		&mut self,
+		is_best: bool,
+		peer_id: PeerId,
+		announce: &BlockAnnounce<B::Header>,
+	) -> Option<(B::Hash, NumberFor<B>)> {
+		let new_best = if let Some(ref mut warp) = self.warp {
+			warp.on_validated_block_announce(is_best, peer_id, announce)
+		} else if let Some(ref mut state) = self.state {
+			state.on_validated_block_announce(is_best, peer_id, announce)
+		} else if let Some(ref mut chain_sync) = self.chain_sync {
+			chain_sync.on_validated_block_announce(is_best, peer_id, announce)
+		} else {
+			error!(target: LOG_TARGET, "No syncing strategy is active.");
+			debug_assert!(false);
+			Some((announce.header.hash(), *announce.header.number()))
+		};
+
+		if let Some(new_best) = new_best {
+			if let Some(best) = self.peer_best_blocks.get_mut(&peer_id) {
+				*best = new_best;
+			} else {
+				debug!(
+					target: LOG_TARGET,
+					"Cannot update `peer_best_blocks` as peer {peer_id} is not known to `Strategy` \
+					 (already disconnected?)",
+				);
+			}
+		}
+
+		new_best
+	}
+
+	fn set_sync_fork_request(&mut self, peers: Vec<PeerId>, hash: &B::Hash, number: NumberFor<B>) {
+		// Fork requests are only handled by `ChainSync`.
+		if let Some(ref mut chain_sync) = self.chain_sync {
+			chain_sync.set_sync_fork_request(peers.clone(), hash, number);
+		}
+	}
+
+	fn request_justification(&mut self, hash: &B::Hash, number: NumberFor<B>) {
+		// Justifications can only be requested via `ChainSync`.
+		if let Some(ref mut chain_sync) = self.chain_sync {
+			chain_sync.request_justification(hash, number);
+		}
+	}
+
+	fn clear_justification_requests(&mut self) {
+		// Justification requests can only be cleared by `ChainSync`.
+		if let Some(ref mut chain_sync) = self.chain_sync {
+			chain_sync.clear_justification_requests();
+		}
+	}
+
+	fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor<B>, success: bool) {
+		// Only `ChainSync` is interested in justification import.
+		if let Some(ref mut chain_sync) = self.chain_sync {
+			chain_sync.on_justification_import(hash, number, success);
+		}
+	}
+
+	fn on_generic_response(
+		&mut self,
+		peer_id: &PeerId,
+		key: StrategyKey,
+		protocol_name: ProtocolName,
+		response: Box<dyn Any + Send>,
+	) {
+		match key {
+			StateStrategy::<B>::STRATEGY_KEY =>
+				if let Some(state) = &mut self.state {
+					let Ok(response) = response.downcast::<Vec<u8>>() else {
+						warn!(target: LOG_TARGET, "Failed to downcast state response");
+						debug_assert!(false);
+						return;
+					};
+
+					state.on_state_response(peer_id, *response);
+				} else if let Some(chain_sync) = &mut self.chain_sync {
+					chain_sync.on_generic_response(peer_id, key, protocol_name, response);
+				} else {
+					error!(
+						target: LOG_TARGET,
+						"`on_generic_response()` called with unexpected key {key:?} \
+						 or corresponding strategy is not active.",
+					);
+					debug_assert!(false);
+				},
+			WarpSync::<B, Client>::STRATEGY_KEY =>
+				if let Some(warp) = &mut self.warp {
+					warp.on_generic_response(peer_id, protocol_name, response);
+				} else {
+					error!(
+						target: LOG_TARGET,
+						"`on_generic_response()` called with unexpected key {key:?} \
+						 or warp strategy is not active",
+					);
+					debug_assert!(false);
+				},
+			ChainSync::<B, Client>::STRATEGY_KEY =>
+				if let Some(chain_sync) = &mut self.chain_sync {
+					chain_sync.on_generic_response(peer_id, key, protocol_name, response);
+				} else {
+					error!(
+						target: LOG_TARGET,
+						"`on_generic_response()` called with unexpected key {key:?} \
+						 or corresponding strategy is not active.",
+					);
+					debug_assert!(false);
+				},
+			key => {
+				warn!(
+					target: LOG_TARGET,
+					"Unexpected generic response strategy key {key:?}, protocol {protocol_name}",
+				);
+				debug_assert!(false);
+			},
+		}
+	}
+
+	fn on_blocks_processed(
+		&mut self,
+		imported: usize,
+		count: usize,
+		results: Vec<(Result<BlockImportStatus<NumberFor<B>>, BlockImportError>, B::Hash)>,
+	) {
+		// Only `StateStrategy` and `ChainSync` are interested in block processing notifications.
+		if let Some(ref mut state) = self.state {
+			state.on_blocks_processed(imported, count, results);
+		} else if let Some(ref mut chain_sync) = self.chain_sync {
+			chain_sync.on_blocks_processed(imported, count, results);
+		}
+	}
+
+	fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor<B>) {
+		// Only `ChainSync` is interested in block finalization notifications.
+		if let Some(ref mut chain_sync) = self.chain_sync {
+			chain_sync.on_block_finalized(hash, number);
+		}
+	}
+
+	fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor<B>) {
+		// This is relevant to `ChainSync` only.
+		if let Some(ref mut chain_sync) = self.chain_sync {
+			chain_sync.update_chain_info(best_hash, best_number);
+		}
+	}
+
+	fn is_major_syncing(&self) -> bool {
+		self.warp.is_some() ||
+			self.state.is_some() ||
+			match self.chain_sync {
+				Some(ref s) => s.status().state.is_major_syncing(),
+				None => unreachable!("At least one syncing strategy is active; qed"),
+			}
+	}
+
+	fn num_peers(&self) -> usize {
+		self.peer_best_blocks.len()
+	}
+
+	fn status(&self) -> SyncStatus<B> {
+		// This function presumes that strategies are executed serially and must be refactored
+		// once we have parallel strategies.
+		if let Some(ref warp) = self.warp {
+			warp.status()
+		} else if let Some(ref state) = self.state {
+			state.status()
+		} else if let Some(ref chain_sync) = self.chain_sync {
+			chain_sync.status()
+		} else {
+			unreachable!("At least one syncing strategy is always active; qed")
+		}
+	}
+
+	fn num_downloaded_blocks(&self) -> usize {
+		self.chain_sync
+			.as_ref()
+			.map_or(0, |chain_sync| chain_sync.num_downloaded_blocks())
+	}
+
+	fn num_sync_requests(&self) -> usize {
+		self.chain_sync.as_ref().map_or(0, |chain_sync| chain_sync.num_sync_requests())
+	}
+
+	fn actions(
+		&mut self,
+		network_service: &NetworkServiceHandle,
+	) -> Result<Vec<SyncingAction<B>>, ClientError> {
+		// This function presumes that strategies are executed serially and must be refactored once
+		// we have parallel strategies.
+		let actions: Vec<_> = if let Some(ref mut warp) = self.warp {
+			warp.actions(network_service).map(Into::into).collect()
+		} else if let Some(ref mut state) = self.state {
+			state.actions(network_service).map(Into::into).collect()
+		} else if let Some(ref mut chain_sync) = self.chain_sync {
+			chain_sync.actions(network_service)?
+		} else {
+			unreachable!("At least one syncing strategy is always active; qed")
+		};
+
+		if actions.iter().any(SyncingAction::is_finished) {
+			self.proceed_to_next()?;
+		}
+
+		Ok(actions)
+	}
+}
+
+impl<B: BlockT, Client> PolkadotSyncingStrategy<B, Client>
+where
+	B: BlockT,
+	Client: HeaderBackend<B>
+		+ BlockBackend<B>
+		+ HeaderMetadata<B, Error = sp_blockchain::Error>
+		+ ProofProvider<B>
+		+ Send
+		+ Sync
+		+ 'static,
+{
+	/// Initialize a new syncing strategy.
+	pub fn new(
+		mut config: PolkadotSyncingStrategyConfig<B>,
+		client: Arc<Client>,
+		warp_sync_config: Option<WarpSyncConfig<B>>,
+		warp_sync_protocol_name: Option<ProtocolName>,
+	) -> Result<Self, ClientError> {
+		if config.max_blocks_per_request > MAX_BLOCKS_IN_RESPONSE as u32 {
+			info!(
+				target: LOG_TARGET,
+				"clamping maximum blocks per request to {MAX_BLOCKS_IN_RESPONSE}",
+			);
+			config.max_blocks_per_request = MAX_BLOCKS_IN_RESPONSE as u32;
+		}
+
+		if let SyncMode::Warp = config.mode {
+			let warp_sync_config = warp_sync_config
+				.expect("Warp sync configuration must be supplied in warp sync mode.");
+			let warp_sync = WarpSync::new(
+				client.clone(),
+				warp_sync_config,
+				warp_sync_protocol_name,
+				config.block_downloader.clone(),
+			);
+			Ok(Self {
+				config,
+				client,
+				warp: Some(warp_sync),
+				state: None,
+				chain_sync: None,
+				peer_best_blocks: Default::default(),
+			})
+		} else {
+			let chain_sync = ChainSync::new(
+				chain_sync_mode(config.mode),
+				client.clone(),
+				config.max_parallel_downloads,
+				config.max_blocks_per_request,
+				config.state_request_protocol_name.clone(),
+				config.block_downloader.clone(),
+				config.metrics_registry.as_ref(),
+				std::iter::empty(),
+			)?;
+			Ok(Self {
+				config,
+				client,
+				warp: None,
+				state: None,
+				chain_sync: Some(chain_sync),
+				peer_best_blocks: Default::default(),
+			})
+		}
+	}
+
+	/// Proceed with the next strategy if the active one finished.
+	pub fn proceed_to_next(&mut self) -> Result<(), ClientError> {
+		// The strategies are switched as `WarpSync` -> `StateStrategy` -> `ChainSync`.
+		if let Some(ref mut warp) = self.warp {
+			match warp.take_result() {
+				Some(res) => {
+					info!(
+						target: LOG_TARGET,
+						"Warp sync is complete, continuing with state sync."
+					);
+					let state_sync = StateStrategy::new(
+						self.client.clone(),
+						res.target_header,
+						res.target_body,
+						res.target_justifications,
+						false,
+						self.peer_best_blocks
+							.iter()
+							.map(|(peer_id, (_, best_number))| (*peer_id, *best_number)),
+						self.config.state_request_protocol_name.clone(),
+					);
+
+					self.warp = None;
+					self.state = Some(state_sync);
+					Ok(())
+				},
+				None => {
+					error!(
+						target: LOG_TARGET,
+						"Warp sync failed. Continuing with full sync."
+					);
+					let chain_sync = match ChainSync::new(
+						chain_sync_mode(self.config.mode),
+						self.client.clone(),
+						self.config.max_parallel_downloads,
+						self.config.max_blocks_per_request,
+						self.config.state_request_protocol_name.clone(),
+						self.config.block_downloader.clone(),
+						self.config.metrics_registry.as_ref(),
+						self.peer_best_blocks.iter().map(|(peer_id, (best_hash, best_number))| {
+							(*peer_id, *best_hash, *best_number)
+						}),
+					) {
+						Ok(chain_sync) => chain_sync,
+						Err(e) => {
+							error!(target: LOG_TARGET, "Failed to start `ChainSync`.");
+							return Err(e)
+						},
+					};
+
+					self.warp = None;
+					self.chain_sync = Some(chain_sync);
+					Ok(())
+				},
+			}
+		} else if let Some(state) = &self.state {
+			if state.is_succeeded() {
+				info!(target: LOG_TARGET, "State sync is complete, continuing with block sync.");
+			} else {
+				error!(target: LOG_TARGET, "State sync failed. Falling back to full sync.");
+			}
+			let chain_sync = match ChainSync::new(
+				chain_sync_mode(self.config.mode),
+				self.client.clone(),
+				self.config.max_parallel_downloads,
+				self.config.max_blocks_per_request,
+				self.config.state_request_protocol_name.clone(),
+				self.config.block_downloader.clone(),
+				self.config.metrics_registry.as_ref(),
+				self.peer_best_blocks.iter().map(|(peer_id, (best_hash, best_number))| {
+					(*peer_id, *best_hash, *best_number)
+				}),
+			) {
+				Ok(chain_sync) => chain_sync,
+				Err(e) => {
+					error!(target: LOG_TARGET, "Failed to start `ChainSync`.");
+					return Err(e);
+				},
+			};
+
+			self.state = None;
+			self.chain_sync = Some(chain_sync);
+			Ok(())
+		} else {
+			unreachable!("Only warp & state strategies can finish; qed")
+		}
+	}
+}
diff --git a/substrate/client/network/sync/src/strategy/state.rs b/substrate/client/network/sync/src/strategy/state.rs
index d69ab3e2d535e9605d227316b0faa02b87952d46..93125fe8f66ac8de21032891292c163b03cfa79c 100644
--- a/substrate/client/network/sync/src/strategy/state.rs
+++ b/substrate/client/network/sync/src/strategy/state.rs
@@ -19,18 +19,22 @@
 //! State sync strategy.
 
 use crate::{
-	schema::v1::StateResponse,
+	schema::v1::{StateRequest, StateResponse},
+	service::network::NetworkServiceHandle,
 	strategy::{
 		disconnected_peers::DisconnectedPeers,
 		state_sync::{ImportResult, StateSync, StateSyncProvider},
+		StrategyKey, SyncingAction,
 	},
-	types::{BadPeer, OpaqueStateRequest, OpaqueStateResponse, SyncState, SyncStatus},
+	types::{BadPeer, SyncState, SyncStatus},
 	LOG_TARGET,
 };
+use futures::{channel::oneshot, FutureExt};
 use log::{debug, error, trace};
+use prost::Message;
 use sc_client_api::ProofProvider;
 use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock};
-use sc_network::ProtocolName;
+use sc_network::{IfDisconnected, ProtocolName};
 use sc_network_common::sync::message::BlockAnnounce;
 use sc_network_types::PeerId;
 use sp_consensus::BlockOrigin;
@@ -38,7 +42,7 @@ use sp_runtime::{
 	traits::{Block as BlockT, Header, NumberFor},
 	Justifications, SaturatedConversion,
 };
-use std::{collections::HashMap, sync::Arc};
+use std::{any::Any, collections::HashMap, sync::Arc};
 
 mod rep {
 	use sc_network::ReputationChange as Rep;
@@ -50,18 +54,6 @@ mod rep {
 	pub const BAD_STATE: Rep = Rep::new(-(1 << 29), "Bad state");
 }
 
-/// Action that should be performed on [`StateStrategy`]'s behalf.
-pub enum StateStrategyAction<B: BlockT> {
-	/// Send state request to peer.
-	SendStateRequest { peer_id: PeerId, protocol_name: ProtocolName, request: OpaqueStateRequest },
-	/// Disconnect and report peer.
-	DropPeer(BadPeer),
-	/// Import blocks.
-	ImportBlocks { origin: BlockOrigin, blocks: Vec<IncomingBlock<B>> },
-	/// State sync has finished.
-	Finished,
-}
-
 enum PeerState {
 	Available,
 	DownloadingState,
@@ -83,12 +75,15 @@ pub struct StateStrategy<B: BlockT> {
 	state_sync: Box<dyn StateSyncProvider<B>>,
 	peers: HashMap<PeerId, Peer<B>>,
 	disconnected_peers: DisconnectedPeers,
-	actions: Vec<StateStrategyAction<B>>,
+	actions: Vec<SyncingAction<B>>,
 	protocol_name: ProtocolName,
 	succeeded: bool,
 }
 
 impl<B: BlockT> StateStrategy<B> {
+	/// Strategy key used by state sync.
+	pub const STRATEGY_KEY: StrategyKey = StrategyKey::new("State");
+
 	/// Create a new instance.
 	pub fn new<Client>(
 		client: Arc<Client>,
@@ -157,7 +152,7 @@ impl<B: BlockT> StateStrategy<B> {
 				if let Some(bad_peer) =
 					self.disconnected_peers.on_disconnect_during_request(*peer_id)
 				{
-					self.actions.push(StateStrategyAction::DropPeer(bad_peer));
+					self.actions.push(SyncingAction::DropPeer(bad_peer));
 				}
 			}
 		}
@@ -185,30 +180,32 @@ impl<B: BlockT> StateStrategy<B> {
 	}
 
 	/// Process state response.
-	pub fn on_state_response(&mut self, peer_id: PeerId, response: OpaqueStateResponse) {
-		if let Err(bad_peer) = self.on_state_response_inner(peer_id, response) {
-			self.actions.push(StateStrategyAction::DropPeer(bad_peer));
+	pub fn on_state_response(&mut self, peer_id: &PeerId, response: Vec<u8>) {
+		if let Err(bad_peer) = self.on_state_response_inner(peer_id, &response) {
+			self.actions.push(SyncingAction::DropPeer(bad_peer));
 		}
 	}
 
 	fn on_state_response_inner(
 		&mut self,
-		peer_id: PeerId,
-		response: OpaqueStateResponse,
+		peer_id: &PeerId,
+		response: &[u8],
 	) -> Result<(), BadPeer> {
 		if let Some(peer) = self.peers.get_mut(&peer_id) {
 			peer.state = PeerState::Available;
 		}
 
-		let response: Box<StateResponse> = response.0.downcast().map_err(|_error| {
-			error!(
-				target: LOG_TARGET,
-				"Failed to downcast opaque state response, this is an implementation bug."
-			);
-			debug_assert!(false);
+		let response = match StateResponse::decode(response) {
+			Ok(response) => response,
+			Err(error) => {
+				debug!(
+					target: LOG_TARGET,
+					"Failed to decode state response from peer {peer_id:?}: {error:?}.",
+				);
 
-			BadPeer(peer_id, rep::BAD_RESPONSE)
-		})?;
+				return Err(BadPeer(*peer_id, rep::BAD_RESPONSE));
+			},
+		};
 
 		debug!(
 			target: LOG_TARGET,
@@ -218,7 +215,7 @@ impl<B: BlockT> StateStrategy<B> {
 			response.proof.len(),
 		);
 
-		match self.state_sync.import(*response) {
+		match self.state_sync.import(response) {
 			ImportResult::Import(hash, header, state, body, justifications) => {
 				let origin = BlockOrigin::NetworkInitialSync;
 				let block = IncomingBlock {
@@ -234,14 +231,13 @@ impl<B: BlockT> StateStrategy<B> {
 					state: Some(state),
 				};
 				debug!(target: LOG_TARGET, "State download is complete. Import is queued");
-				self.actions
-					.push(StateStrategyAction::ImportBlocks { origin, blocks: vec![block] });
+				self.actions.push(SyncingAction::ImportBlocks { origin, blocks: vec![block] });
 				Ok(())
 			},
 			ImportResult::Continue => Ok(()),
 			ImportResult::BadResponse => {
 				debug!(target: LOG_TARGET, "Bad state data received from {peer_id}");
-				Err(BadPeer(peer_id, rep::BAD_STATE))
+				Err(BadPeer(*peer_id, rep::BAD_STATE))
 			},
 		}
 	}
@@ -281,12 +277,12 @@ impl<B: BlockT> StateStrategy<B> {
 				);
 			});
 			self.succeeded |= results.into_iter().any(|result| result.is_ok());
-			self.actions.push(StateStrategyAction::Finished);
+			self.actions.push(SyncingAction::Finished);
 		}
 	}
 
 	/// Produce state request.
-	fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> {
+	fn state_request(&mut self) -> Option<(PeerId, StateRequest)> {
 		if self.state_sync.is_complete() {
 			return None
 		}
@@ -307,7 +303,7 @@ impl<B: BlockT> StateStrategy<B> {
 			target: LOG_TARGET,
 			"New state request to {peer_id}: {request:?}.",
 		);
-		Some((peer_id, OpaqueStateRequest(Box::new(request))))
+		Some((peer_id, request))
 	}
 
 	fn schedule_next_peer(
@@ -354,12 +350,31 @@ impl<B: BlockT> StateStrategy<B> {
 
 	/// Get actions that should be performed by the owner on [`WarpSync`]'s behalf
 	#[must_use]
-	pub fn actions(&mut self) -> impl Iterator<Item = StateStrategyAction<B>> {
+	pub fn actions(
+		&mut self,
+		network_service: &NetworkServiceHandle,
+	) -> impl Iterator<Item = SyncingAction<B>> {
 		let state_request = self.state_request().into_iter().map(|(peer_id, request)| {
-			StateStrategyAction::SendStateRequest {
+			let (tx, rx) = oneshot::channel();
+
+			network_service.start_request(
+				peer_id,
+				self.protocol_name.clone(),
+				request.encode_to_vec(),
+				tx,
+				IfDisconnected::ImmediateError,
+			);
+
+			SyncingAction::StartRequest {
 				peer_id,
-				protocol_name: self.protocol_name.clone(),
-				request,
+				key: Self::STRATEGY_KEY,
+				request: async move {
+					Ok(rx.await?.and_then(|(response, protocol_name)| {
+						Ok((Box::new(response) as Box<dyn Any + Send>, protocol_name))
+					}))
+				}
+				.boxed(),
+				remove_obsolete: false,
 			}
 		});
 		self.actions.extend(state_request);
@@ -379,6 +394,7 @@ mod test {
 	use super::*;
 	use crate::{
 		schema::v1::{StateRequest, StateResponse},
+		service::network::NetworkServiceProvider,
 		strategy::state_sync::{ImportResult, StateSyncProgress, StateSyncProvider},
 	};
 	use codec::Decode;
@@ -579,8 +595,7 @@ mod test {
 			ProtocolName::Static(""),
 		);
 
-		let (_peer_id, mut opaque_request) = state_strategy.state_request().unwrap();
-		let request: &mut StateRequest = opaque_request.0.downcast_mut().unwrap();
+		let (_peer_id, request) = state_strategy.state_request().unwrap();
 		let hash = Hash::decode(&mut &*request.block).unwrap();
 
 		assert_eq!(hash, target_block.header().hash());
@@ -631,8 +646,8 @@ mod test {
 		// Manually set the peer's state.
 		state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState;
 
-		let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default()));
-		state_strategy.on_state_response(peer_id, dummy_response);
+		let dummy_response = StateResponse::default().encode_to_vec();
+		state_strategy.on_state_response(&peer_id, dummy_response);
 
 		assert!(state_strategy.peers.get(&peer_id).unwrap().state.is_available());
 	}
@@ -651,10 +666,10 @@ mod test {
 		);
 		// Manually set the peer's state.
 		state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState;
-		let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default()));
+		let dummy_response = StateResponse::default().encode_to_vec();
 		// Receiving response drops the peer.
 		assert!(matches!(
-			state_strategy.on_state_response_inner(peer_id, dummy_response),
+			state_strategy.on_state_response_inner(&peer_id, &dummy_response),
 			Err(BadPeer(id, _rep)) if id == peer_id,
 		));
 	}
@@ -674,8 +689,8 @@ mod test {
 		// Manually set the peer's state .
 		state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState;
 
-		let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default()));
-		state_strategy.on_state_response(peer_id, dummy_response);
+		let dummy_response = StateResponse::default().encode_to_vec();
+		state_strategy.on_state_response(&peer_id, dummy_response);
 
 		// No actions generated.
 		assert_eq!(state_strategy.actions.len(), 0)
@@ -737,13 +752,13 @@ mod test {
 		state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState;
 
 		// Receive response.
-		let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default()));
-		state_strategy.on_state_response(peer_id, dummy_response);
+		let dummy_response = StateResponse::default().encode_to_vec();
+		state_strategy.on_state_response(&peer_id, dummy_response);
 
 		assert_eq!(state_strategy.actions.len(), 1);
 		assert!(matches!(
 			&state_strategy.actions[0],
-			StateStrategyAction::ImportBlocks { origin, blocks }
+			SyncingAction::ImportBlocks { origin, blocks }
 				if *origin == expected_origin && *blocks == expected_blocks,
 		));
 	}
@@ -799,7 +814,7 @@ mod test {
 
 		// Strategy finishes.
 		assert_eq!(state_strategy.actions.len(), 1);
-		assert!(matches!(&state_strategy.actions[0], StateStrategyAction::Finished));
+		assert!(matches!(&state_strategy.actions[0], SyncingAction::Finished));
 	}
 
 	#[test]
@@ -826,7 +841,7 @@ mod test {
 
 		// Strategy finishes.
 		assert_eq!(state_strategy.actions.len(), 1);
-		assert!(matches!(&state_strategy.actions[0], StateStrategyAction::Finished));
+		assert!(matches!(&state_strategy.actions[0], SyncingAction::Finished));
 	}
 
 	#[test]
@@ -854,12 +869,15 @@ mod test {
 			)],
 		);
 
+		let network_provider = NetworkServiceProvider::new();
+		let network_handle = network_provider.handle();
+
 		// Strategy finishes.
-		let actions = state_strategy.actions().collect::<Vec<_>>();
+		let actions = state_strategy.actions(&network_handle).collect::<Vec<_>>();
 		assert_eq!(actions.len(), 1);
-		assert!(matches!(&actions[0], StateStrategyAction::Finished));
+		assert!(matches!(&actions[0], SyncingAction::Finished));
 
 		// No more actions generated.
-		assert_eq!(state_strategy.actions().count(), 0);
+		assert_eq!(state_strategy.actions(&network_handle).count(), 0);
 	}
 }
diff --git a/substrate/client/network/sync/src/strategy/warp.rs b/substrate/client/network/sync/src/strategy/warp.rs
index 0c71dd3c6aeeea59470f30b4dbe2176541d81285..673bc1688ecc680c3ac62f5db0db53652c551bb7 100644
--- a/substrate/client/network/sync/src/strategy/warp.rs
+++ b/substrate/client/network/sync/src/strategy/warp.rs
@@ -21,13 +21,19 @@
 pub use sp_consensus_grandpa::{AuthorityList, SetId};
 
 use crate::{
-	strategy::{chain_sync::validate_blocks, disconnected_peers::DisconnectedPeers},
+	block_relay_protocol::{BlockDownloader, BlockResponseError},
+	service::network::NetworkServiceHandle,
+	strategy::{
+		chain_sync::validate_blocks, disconnected_peers::DisconnectedPeers, StrategyKey,
+		SyncingAction,
+	},
 	types::{BadPeer, SyncState, SyncStatus},
 	LOG_TARGET,
 };
 use codec::{Decode, Encode};
+use futures::{channel::oneshot, FutureExt};
 use log::{debug, error, trace, warn};
-use sc_network::ProtocolName;
+use sc_network::{IfDisconnected, ProtocolName};
 use sc_network_common::sync::message::{
 	BlockAnnounce, BlockAttributes, BlockData, BlockRequest, Direction, FromBlock,
 };
@@ -37,7 +43,7 @@ use sp_runtime::{
 	traits::{Block as BlockT, Header, NumberFor, Zero},
 	Justifications, SaturatedConversion,
 };
-use std::{collections::HashMap, fmt, sync::Arc};
+use std::{any::Any, collections::HashMap, fmt, sync::Arc};
 
 /// Number of peers that need to be connected before warp sync is started.
 const MIN_PEERS_TO_START_WARP_SYNC: usize = 3;
@@ -97,6 +103,9 @@ mod rep {
 
 	/// Reputation change for peers which send us a block which we fail to verify.
 	pub const VERIFICATION_FAIL: Rep = Rep::new(-(1 << 29), "Block verification failed");
+
+	/// We received a message that failed to decode.
+	pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message");
 }
 
 /// Reported warp sync phase.
@@ -186,22 +195,6 @@ struct Peer<B: BlockT> {
 	state: PeerState,
 }
 
-/// Action that should be performed on [`WarpSync`]'s behalf.
-pub enum WarpSyncAction<B: BlockT> {
-	/// Send warp proof request to peer.
-	SendWarpProofRequest {
-		peer_id: PeerId,
-		protocol_name: ProtocolName,
-		request: WarpProofRequest<B>,
-	},
-	/// Send block request to peer. Always implies dropping a stale block request to the same peer.
-	SendBlockRequest { peer_id: PeerId, request: BlockRequest<B> },
-	/// Disconnect and report peer.
-	DropPeer(BadPeer),
-	/// Warp sync has finished.
-	Finished,
-}
-
 pub struct WarpSyncResult<B: BlockT> {
 	pub target_header: B::Header,
 	pub target_body: Option<Vec<B::Extrinsic>>,
@@ -217,7 +210,8 @@ pub struct WarpSync<B: BlockT, Client> {
 	peers: HashMap<PeerId, Peer<B>>,
 	disconnected_peers: DisconnectedPeers,
 	protocol_name: Option<ProtocolName>,
-	actions: Vec<WarpSyncAction<B>>,
+	block_downloader: Arc<dyn BlockDownloader<B>>,
+	actions: Vec<SyncingAction<B>>,
 	result: Option<WarpSyncResult<B>>,
 }
 
@@ -226,6 +220,9 @@ where
 	B: BlockT,
 	Client: HeaderBackend<B> + 'static,
 {
+	/// Strategy key used by warp sync.
+	pub const STRATEGY_KEY: StrategyKey = StrategyKey::new("Warp");
+
 	/// Create a new instance. When passing a warp sync provider we will be checking for proof and
 	/// authorities. Alternatively we can pass a target block when we want to skip downloading
 	/// proofs, in this case we will continue polling until the target block is known.
@@ -233,6 +230,7 @@ where
 		client: Arc<Client>,
 		warp_sync_config: WarpSyncConfig<B>,
 		protocol_name: Option<ProtocolName>,
+		block_downloader: Arc<dyn BlockDownloader<B>>,
 	) -> Self {
 		if client.info().finalized_state.is_some() {
 			error!(
@@ -247,7 +245,8 @@ where
 				peers: HashMap::new(),
 				disconnected_peers: DisconnectedPeers::new(),
 				protocol_name,
-				actions: vec![WarpSyncAction::Finished],
+				block_downloader,
+				actions: vec![SyncingAction::Finished],
 				result: None,
 			}
 		}
@@ -266,6 +265,7 @@ where
 			peers: HashMap::new(),
 			disconnected_peers: DisconnectedPeers::new(),
 			protocol_name,
+			block_downloader,
 			actions: Vec::new(),
 			result: None,
 		}
@@ -285,7 +285,7 @@ where
 				if let Some(bad_peer) =
 					self.disconnected_peers.on_disconnect_during_request(*peer_id)
 				{
-					self.actions.push(WarpSyncAction::DropPeer(bad_peer));
+					self.actions.push(SyncingAction::DropPeer(bad_peer));
 				}
 			}
 		}
@@ -329,6 +329,58 @@ where
 		trace!(target: LOG_TARGET, "Started warp sync with {} peers.", self.peers.len());
 	}
 
+	pub fn on_generic_response(
+		&mut self,
+		peer_id: &PeerId,
+		protocol_name: ProtocolName,
+		response: Box<dyn Any + Send>,
+	) {
+		if &protocol_name == self.block_downloader.protocol_name() {
+			let Ok(response) = response
+				.downcast::<(BlockRequest<B>, Result<Vec<BlockData<B>>, BlockResponseError>)>()
+			else {
+				warn!(target: LOG_TARGET, "Failed to downcast block response");
+				debug_assert!(false);
+				return;
+			};
+
+			let (request, response) = *response;
+			let blocks = match response {
+				Ok(blocks) => blocks,
+				Err(BlockResponseError::DecodeFailed(e)) => {
+					debug!(
+						target: LOG_TARGET,
+						"Failed to decode block response from peer {:?}: {:?}.",
+						peer_id,
+						e
+					);
+					self.actions.push(SyncingAction::DropPeer(BadPeer(*peer_id, rep::BAD_MESSAGE)));
+					return;
+				},
+				Err(BlockResponseError::ExtractionFailed(e)) => {
+					debug!(
+						target: LOG_TARGET,
+						"Failed to extract blocks from peer response {:?}: {:?}.",
+						peer_id,
+						e
+					);
+					self.actions.push(SyncingAction::DropPeer(BadPeer(*peer_id, rep::BAD_MESSAGE)));
+					return;
+				},
+			};
+
+			self.on_block_response(*peer_id, request, blocks);
+		} else {
+			let Ok(response) = response.downcast::<Vec<u8>>() else {
+				warn!(target: LOG_TARGET, "Failed to downcast warp sync response");
+				debug_assert!(false);
+				return;
+			};
+
+			self.on_warp_proof_response(peer_id, EncodedProof(*response));
+		}
+	}
+
 	/// Process warp proof response.
 	pub fn on_warp_proof_response(&mut self, peer_id: &PeerId, response: EncodedProof) {
 		if let Some(peer) = self.peers.get_mut(peer_id) {
@@ -340,7 +392,7 @@ where
 		else {
 			debug!(target: LOG_TARGET, "Unexpected warp proof response");
 			self.actions
-				.push(WarpSyncAction::DropPeer(BadPeer(*peer_id, rep::UNEXPECTED_RESPONSE)));
+				.push(SyncingAction::DropPeer(BadPeer(*peer_id, rep::UNEXPECTED_RESPONSE)));
 			return
 		};
 
@@ -348,7 +400,7 @@ where
 			Err(e) => {
 				debug!(target: LOG_TARGET, "Bad warp proof response: {}", e);
 				self.actions
-					.push(WarpSyncAction::DropPeer(BadPeer(*peer_id, rep::BAD_WARP_PROOF)))
+					.push(SyncingAction::DropPeer(BadPeer(*peer_id, rep::BAD_WARP_PROOF)))
 			},
 			Ok(VerificationResult::Partial(new_set_id, new_authorities, new_last_hash)) => {
 				log::debug!(target: LOG_TARGET, "Verified partial proof, set_id={:?}", new_set_id);
@@ -379,7 +431,7 @@ where
 		blocks: Vec<BlockData<B>>,
 	) {
 		if let Err(bad_peer) = self.on_block_response_inner(peer_id, request, blocks) {
-			self.actions.push(WarpSyncAction::DropPeer(bad_peer));
+			self.actions.push(SyncingAction::DropPeer(bad_peer));
 		}
 	}
 
@@ -449,7 +501,7 @@ where
 			target_justifications: block.justifications,
 		});
 		self.phase = Phase::Complete;
-		self.actions.push(WarpSyncAction::Finished);
+		self.actions.push(SyncingAction::Finished);
 		Ok(())
 	}
 
@@ -606,17 +658,67 @@ where
 
 	/// Get actions that should be performed by the owner on [`WarpSync`]'s behalf
 	#[must_use]
-	pub fn actions(&mut self) -> impl Iterator<Item = WarpSyncAction<B>> {
+	pub fn actions(
+		&mut self,
+		network_service: &NetworkServiceHandle,
+	) -> impl Iterator<Item = SyncingAction<B>> {
 		let warp_proof_request =
 			self.warp_proof_request().into_iter().map(|(peer_id, protocol_name, request)| {
-				WarpSyncAction::SendWarpProofRequest { peer_id, protocol_name, request }
+				trace!(
+					target: LOG_TARGET,
+					"Created `WarpProofRequest` to {}, request: {:?}.",
+					peer_id,
+					request,
+				);
+
+				let (tx, rx) = oneshot::channel();
+
+				network_service.start_request(
+					peer_id,
+					protocol_name,
+					request.encode(),
+					tx,
+					IfDisconnected::ImmediateError,
+				);
+
+				SyncingAction::StartRequest {
+					peer_id,
+					key: Self::STRATEGY_KEY,
+					request: async move {
+						Ok(rx.await?.and_then(|(response, protocol_name)| {
+							Ok((Box::new(response) as Box<dyn Any + Send>, protocol_name))
+						}))
+					}
+					.boxed(),
+					remove_obsolete: false,
+				}
 			});
 		self.actions.extend(warp_proof_request);
 
-		let target_block_request = self
-			.target_block_request()
-			.into_iter()
-			.map(|(peer_id, request)| WarpSyncAction::SendBlockRequest { peer_id, request });
+		let target_block_request =
+			self.target_block_request().into_iter().map(|(peer_id, request)| {
+				let downloader = self.block_downloader.clone();
+
+				SyncingAction::StartRequest {
+					peer_id,
+					key: Self::STRATEGY_KEY,
+					request: async move {
+						Ok(downloader.download_blocks(peer_id, request.clone()).await?.and_then(
+							|(response, protocol_name)| {
+								let decoded_response =
+									downloader.block_response_into_blocks(&request, response);
+								let result =
+									Box::new((request, decoded_response)) as Box<dyn Any + Send>;
+								Ok((result, protocol_name))
+							},
+						))
+					}
+					.boxed(),
+					// Sending block request implies dropping obsolete pending response as we are
+					// not interested in it anymore.
+					remove_obsolete: true,
+				}
+			});
 		self.actions.extend(target_block_request);
 
 		std::mem::take(&mut self.actions).into_iter()
@@ -632,6 +734,7 @@ where
 #[cfg(test)]
 mod test {
 	use super::*;
+	use crate::{mock::MockBlockDownloader, service::network::NetworkServiceProvider};
 	use sc_block_builder::BlockBuilderBuilder;
 	use sp_blockchain::{BlockStatus, Error as BlockchainError, HeaderBackend, Info};
 	use sp_consensus_grandpa::{AuthorityList, SetId};
@@ -716,12 +819,16 @@ mod test {
 		let client = mock_client_with_state();
 		let provider = MockWarpSyncProvider::<Block>::new();
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(Arc::new(client), config, None);
+		let mut warp_sync =
+			WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new()));
+
+		let network_provider = NetworkServiceProvider::new();
+		let network_handle = network_provider.handle();
 
 		// Warp sync instantly finishes
-		let actions = warp_sync.actions().collect::<Vec<_>>();
+		let actions = warp_sync.actions(&network_handle).collect::<Vec<_>>();
 		assert_eq!(actions.len(), 1);
-		assert!(matches!(actions[0], WarpSyncAction::Finished));
+		assert!(matches!(actions[0], SyncingAction::Finished));
 
 		// ... with no result.
 		assert!(warp_sync.take_result().is_none());
@@ -737,12 +844,16 @@ mod test {
 			Default::default(),
 			Default::default(),
 		));
-		let mut warp_sync = WarpSync::new(Arc::new(client), config, None);
+		let mut warp_sync =
+			WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new()));
+
+		let network_provider = NetworkServiceProvider::new();
+		let network_handle = network_provider.handle();
 
 		// Warp sync instantly finishes
-		let actions = warp_sync.actions().collect::<Vec<_>>();
+		let actions = warp_sync.actions(&network_handle).collect::<Vec<_>>();
 		assert_eq!(actions.len(), 1);
-		assert!(matches!(actions[0], WarpSyncAction::Finished));
+		assert!(matches!(actions[0], SyncingAction::Finished));
 
 		// ... with no result.
 		assert!(warp_sync.take_result().is_none());
@@ -753,10 +864,14 @@ mod test {
 		let client = mock_client_without_state();
 		let provider = MockWarpSyncProvider::<Block>::new();
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(Arc::new(client), config, None);
+		let mut warp_sync =
+			WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new()));
+
+		let network_provider = NetworkServiceProvider::new();
+		let network_handle = network_provider.handle();
 
 		// No actions are emitted.
-		assert_eq!(warp_sync.actions().count(), 0)
+		assert_eq!(warp_sync.actions(&network_handle).count(), 0)
 	}
 
 	#[test]
@@ -769,10 +884,14 @@ mod test {
 			Default::default(),
 			Default::default(),
 		));
-		let mut warp_sync = WarpSync::new(Arc::new(client), config, None);
+		let mut warp_sync =
+			WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new()));
+
+		let network_provider = NetworkServiceProvider::new();
+		let network_handle = network_provider.handle();
 
 		// No actions are emitted.
-		assert_eq!(warp_sync.actions().count(), 0)
+		assert_eq!(warp_sync.actions(&network_handle).count(), 0)
 	}
 
 	#[test]
@@ -784,7 +903,8 @@ mod test {
 			.once()
 			.return_const(AuthorityList::default());
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(Arc::new(client), config, None);
+		let mut warp_sync =
+			WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new()));
 
 		// Warp sync is not started when there is not enough peers.
 		for _ in 0..(MIN_PEERS_TO_START_WARP_SYNC - 1) {
@@ -802,7 +922,8 @@ mod test {
 		let client = mock_client_without_state();
 		let provider = MockWarpSyncProvider::<Block>::new();
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(Arc::new(client), config, None);
+		let mut warp_sync =
+			WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new()));
 
 		assert!(warp_sync.schedule_next_peer(PeerState::DownloadingProofs, None).is_none());
 	}
@@ -826,7 +947,8 @@ mod test {
 				.once()
 				.return_const(AuthorityList::default());
 			let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-			let mut warp_sync = WarpSync::new(Arc::new(client), config, None);
+			let mut warp_sync =
+				WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new()));
 
 			for best_number in 1..11 {
 				warp_sync.add_peer(PeerId::random(), Hash::random(), best_number);
@@ -847,7 +969,8 @@ mod test {
 				.once()
 				.return_const(AuthorityList::default());
 			let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-			let mut warp_sync = WarpSync::new(Arc::new(client), config, None);
+			let mut warp_sync =
+				WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new()));
 
 			for best_number in 1..11 {
 				warp_sync.add_peer(PeerId::random(), Hash::random(), best_number);
@@ -867,7 +990,8 @@ mod test {
 			.once()
 			.return_const(AuthorityList::default());
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(Arc::new(client), config, None);
+		let mut warp_sync =
+			WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new()));
 
 		for best_number in 1..11 {
 			warp_sync.add_peer(PeerId::random(), Hash::random(), best_number);
@@ -911,7 +1035,12 @@ mod test {
 			.once()
 			.return_const(AuthorityList::default());
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(Arc::new(client), config, Some(ProtocolName::Static("")));
+		let mut warp_sync = WarpSync::new(
+			Arc::new(client),
+			config,
+			Some(ProtocolName::Static("")),
+			Arc::new(MockBlockDownloader::new()),
+		);
 
 		// Make sure we have enough peers to make a request.
 		for best_number in 1..11 {
@@ -940,7 +1069,12 @@ mod test {
 			.once()
 			.return_const(AuthorityList::default());
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(Arc::new(client), config, Some(ProtocolName::Static("")));
+		let mut warp_sync = WarpSync::new(
+			Arc::new(client),
+			config,
+			Some(ProtocolName::Static("")),
+			Arc::new(MockBlockDownloader::new()),
+		);
 
 		// Make sure we have enough peers to make a request.
 		for best_number in 1..11 {
@@ -971,7 +1105,12 @@ mod test {
 			.once()
 			.return_const(AuthorityList::default());
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(Arc::new(client), config, Some(ProtocolName::Static("")));
+		let mut warp_sync = WarpSync::new(
+			Arc::new(client),
+			config,
+			Some(ProtocolName::Static("")),
+			Arc::new(MockBlockDownloader::new()),
+		);
 
 		// Make sure we have enough peers to make requests.
 		for best_number in 1..11 {
@@ -998,7 +1137,12 @@ mod test {
 			Err(Box::new(std::io::Error::new(ErrorKind::Other, "test-verification-failure")))
 		});
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(Arc::new(client), config, Some(ProtocolName::Static("")));
+		let mut warp_sync = WarpSync::new(
+			Arc::new(client),
+			config,
+			Some(ProtocolName::Static("")),
+			Arc::new(MockBlockDownloader::new()),
+		);
 
 		// Make sure we have enough peers to make a request.
 		for best_number in 1..11 {
@@ -1006,11 +1150,13 @@ mod test {
 		}
 		assert!(matches!(warp_sync.phase, Phase::WarpProof { .. }));
 
+		let network_provider = NetworkServiceProvider::new();
+		let network_handle = network_provider.handle();
+
 		// Consume `SendWarpProofRequest` action.
-		let actions = warp_sync.actions().collect::<Vec<_>>();
+		let actions = warp_sync.actions(&network_handle).collect::<Vec<_>>();
 		assert_eq!(actions.len(), 1);
-		let WarpSyncAction::SendWarpProofRequest { peer_id: request_peer_id, .. } = actions[0]
-		else {
+		let SyncingAction::StartRequest { peer_id: request_peer_id, .. } = actions[0] else {
 			panic!("Invalid action");
 		};
 
@@ -1021,7 +1167,7 @@ mod test {
 		assert_eq!(actions.len(), 1);
 		assert!(matches!(
 			actions[0],
-			WarpSyncAction::DropPeer(BadPeer(peer_id, _rep)) if peer_id == request_peer_id
+			SyncingAction::DropPeer(BadPeer(peer_id, _rep)) if peer_id == request_peer_id
 		));
 		assert!(matches!(warp_sync.phase, Phase::WarpProof { .. }));
 	}
@@ -1039,7 +1185,12 @@ mod test {
 			Ok(VerificationResult::Partial(set_id, authorities, Hash::random()))
 		});
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(Arc::new(client), config, Some(ProtocolName::Static("")));
+		let mut warp_sync = WarpSync::new(
+			Arc::new(client),
+			config,
+			Some(ProtocolName::Static("")),
+			Arc::new(MockBlockDownloader::new()),
+		);
 
 		// Make sure we have enough peers to make a request.
 		for best_number in 1..11 {
@@ -1047,11 +1198,13 @@ mod test {
 		}
 		assert!(matches!(warp_sync.phase, Phase::WarpProof { .. }));
 
+		let network_provider = NetworkServiceProvider::new();
+		let network_handle = network_provider.handle();
+
 		// Consume `SendWarpProofRequest` action.
-		let actions = warp_sync.actions().collect::<Vec<_>>();
+		let actions = warp_sync.actions(&network_handle).collect::<Vec<_>>();
 		assert_eq!(actions.len(), 1);
-		let WarpSyncAction::SendWarpProofRequest { peer_id: request_peer_id, .. } = actions[0]
-		else {
+		let SyncingAction::StartRequest { peer_id: request_peer_id, .. } = actions[0] else {
 			panic!("Invalid action");
 		};
 
@@ -1083,7 +1236,12 @@ mod test {
 			Ok(VerificationResult::Complete(set_id, authorities, target_header))
 		});
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(client, config, Some(ProtocolName::Static("")));
+		let mut warp_sync = WarpSync::new(
+			client,
+			config,
+			Some(ProtocolName::Static("")),
+			Arc::new(MockBlockDownloader::new()),
+		);
 
 		// Make sure we have enough peers to make a request.
 		for best_number in 1..11 {
@@ -1091,11 +1249,13 @@ mod test {
 		}
 		assert!(matches!(warp_sync.phase, Phase::WarpProof { .. }));
 
+		let network_provider = NetworkServiceProvider::new();
+		let network_handle = network_provider.handle();
+
 		// Consume `SendWarpProofRequest` action.
-		let actions = warp_sync.actions().collect::<Vec<_>>();
+		let actions = warp_sync.actions(&network_handle).collect::<Vec<_>>();
 		assert_eq!(actions.len(), 1);
-		let WarpSyncAction::SendWarpProofRequest { peer_id: request_peer_id, .. } = actions[0]
-		else {
+		let SyncingAction::StartRequest { peer_id: request_peer_id, .. } = actions[0] else {
 			panic!("Invalid action.");
 		};
 
@@ -1116,7 +1276,8 @@ mod test {
 			.once()
 			.return_const(AuthorityList::default());
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(Arc::new(client), config, None);
+		let mut warp_sync =
+			WarpSync::new(Arc::new(client), config, None, Arc::new(MockBlockDownloader::new()));
 
 		// Make sure we have enough peers to make a request.
 		for best_number in 1..11 {
@@ -1151,7 +1312,8 @@ mod test {
 			Ok(VerificationResult::Complete(set_id, authorities, target_header))
 		});
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(client, config, None);
+		let mut warp_sync =
+			WarpSync::new(client, config, None, Arc::new(MockBlockDownloader::new()));
 
 		// Make sure we have enough peers to make a request.
 		for best_number in 1..11 {
@@ -1183,7 +1345,8 @@ mod test {
 			.block;
 		let target_header = target_block.header().clone();
 		let config = WarpSyncConfig::WithTarget(target_header);
-		let mut warp_sync = WarpSync::new(client, config, None);
+		let mut warp_sync =
+			WarpSync::new(client, config, None, Arc::new(MockBlockDownloader::new()));
 
 		// Make sure we have enough peers to make a request.
 		for best_number in 1..11 {
@@ -1223,7 +1386,8 @@ mod test {
 			Ok(VerificationResult::Complete(set_id, authorities, target_header))
 		});
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(client, config, None);
+		let mut warp_sync =
+			WarpSync::new(client, config, None, Arc::new(MockBlockDownloader::new()));
 
 		// Make sure we have enough peers to make a request.
 		for best_number in 1..11 {
@@ -1261,7 +1425,8 @@ mod test {
 			Ok(VerificationResult::Complete(set_id, authorities, target_header))
 		});
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(client, config, None);
+		let mut warp_sync =
+			WarpSync::new(client, config, None, Arc::new(MockBlockDownloader::new()));
 
 		// Make sure we have enough peers to make a request.
 		for best_number in 1..11 {
@@ -1315,7 +1480,8 @@ mod test {
 			Ok(VerificationResult::Complete(set_id, authorities, target_header))
 		});
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(client, config, None);
+		let mut warp_sync =
+			WarpSync::new(client, config, None, Arc::new(MockBlockDownloader::new()));
 
 		// Make sure we have enough peers to make a request.
 		for best_number in 1..11 {
@@ -1392,7 +1558,8 @@ mod test {
 			Ok(VerificationResult::Complete(set_id, authorities, target_header))
 		});
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(client, config, None);
+		let mut warp_sync =
+			WarpSync::new(client, config, None, Arc::new(MockBlockDownloader::new()));
 
 		// Make sure we have enough peers to make a request.
 		for best_number in 1..11 {
@@ -1445,7 +1612,8 @@ mod test {
 			Ok(VerificationResult::Complete(set_id, authorities, target_header))
 		});
 		let config = WarpSyncConfig::WithProvider(Arc::new(provider));
-		let mut warp_sync = WarpSync::new(client, config, None);
+		let mut warp_sync =
+			WarpSync::new(client, config, None, Arc::new(MockBlockDownloader::new()));
 
 		// Make sure we have enough peers to make a request.
 		for best_number in 1..11 {
@@ -1473,10 +1641,13 @@ mod test {
 
 		assert!(warp_sync.on_block_response_inner(peer_id, request, response).is_ok());
 
+		let network_provider = NetworkServiceProvider::new();
+		let network_handle = network_provider.handle();
+
 		// Strategy finishes.
-		let actions = warp_sync.actions().collect::<Vec<_>>();
+		let actions = warp_sync.actions(&network_handle).collect::<Vec<_>>();
 		assert_eq!(actions.len(), 1);
-		assert!(matches!(actions[0], WarpSyncAction::Finished));
+		assert!(matches!(actions[0], SyncingAction::Finished));
 
 		// With correct result.
 		let result = warp_sync.take_result().unwrap();
diff --git a/substrate/client/network/sync/src/types.rs b/substrate/client/network/sync/src/types.rs
index c3403fe1e5f7561c41fa7e8460c302712282df77..5745a34378df68f65446953b55c8a0ce39f3c3d0 100644
--- a/substrate/client/network/sync/src/types.rs
+++ b/substrate/client/network/sync/src/types.rs
@@ -23,11 +23,10 @@ use sc_network_common::{role::Roles, types::ReputationChange};
 
 use crate::strategy::{state_sync::StateSyncProgress, warp::WarpSyncProgress};
 
-use sc_network_common::sync::message::BlockRequest;
 use sc_network_types::PeerId;
 use sp_runtime::traits::{Block as BlockT, NumberFor};
 
-use std::{any::Any, fmt, fmt::Formatter, pin::Pin, sync::Arc};
+use std::{fmt, pin::Pin, sync::Arc};
 
 /// The sync status of a peer we are trying to sync with
 #[derive(Debug)]
@@ -107,52 +106,6 @@ impl fmt::Display for BadPeer {
 
 impl std::error::Error for BadPeer {}
 
-#[derive(Debug)]
-pub enum PeerRequest<B: BlockT> {
-	Block(BlockRequest<B>),
-	State,
-	WarpProof,
-}
-
-#[derive(Debug)]
-pub enum PeerRequestType {
-	Block,
-	State,
-	WarpProof,
-}
-
-impl<B: BlockT> PeerRequest<B> {
-	pub fn get_type(&self) -> PeerRequestType {
-		match self {
-			PeerRequest::Block(_) => PeerRequestType::Block,
-			PeerRequest::State => PeerRequestType::State,
-			PeerRequest::WarpProof => PeerRequestType::WarpProof,
-		}
-	}
-}
-
-/// Wrapper for implementation-specific state request.
-///
-/// NOTE: Implementation must be able to encode and decode it for network purposes.
-pub struct OpaqueStateRequest(pub Box<dyn Any + Send>);
-
-impl fmt::Debug for OpaqueStateRequest {
-	fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
-		f.debug_struct("OpaqueStateRequest").finish()
-	}
-}
-
-/// Wrapper for implementation-specific state response.
-///
-/// NOTE: Implementation must be able to encode and decode it for network purposes.
-pub struct OpaqueStateResponse(pub Box<dyn Any + Send>);
-
-impl fmt::Debug for OpaqueStateResponse {
-	fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
-		f.debug_struct("OpaqueStateResponse").finish()
-	}
-}
-
 /// Provides high-level status of syncing.
 #[async_trait::async_trait]
 pub trait SyncStatusProvider<Block: BlockT>: Send + Sync {
diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs
index 06e243342fb2dd4a9f8fac8680545ef481c66b66..825481314c672ae26c2309b43880eb11e68f94c3 100644
--- a/substrate/client/network/test/src/lib.rs
+++ b/substrate/client/network/test/src/lib.rs
@@ -67,11 +67,11 @@ use sc_network_sync::{
 	service::{network::NetworkServiceProvider, syncing_service::SyncingService},
 	state_request_handler::StateRequestHandler,
 	strategy::{
+		polkadot::{PolkadotSyncingStrategy, PolkadotSyncingStrategyConfig},
 		warp::{
 			AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncConfig,
 			WarpSyncProvider,
 		},
-		PolkadotSyncingStrategy, SyncingConfig,
 	},
 	warp_request_handler,
 };
@@ -833,8 +833,8 @@ pub trait TestNetFactory: Default + Sized + Send {
 
 		let fork_id = Some(String::from("test-fork-id"));
 
-		let (chain_sync_network_provider, chain_sync_network_handle) =
-			NetworkServiceProvider::new();
+		let chain_sync_network_provider = NetworkServiceProvider::new();
+		let chain_sync_network_handle = chain_sync_network_provider.handle();
 		let mut block_relay_params = BlockRequestHandler::new::<NetworkWorker<_, _>>(
 			chain_sync_network_handle.clone(),
 			&protocol_id,
@@ -908,12 +908,13 @@ pub trait TestNetFactory: Default + Sized + Send {
 			<Block as BlockT>::Hash,
 		>>::register_notification_metrics(None);
 
-		let syncing_config = SyncingConfig {
+		let syncing_config = PolkadotSyncingStrategyConfig {
 			mode: network_config.sync_mode,
 			max_parallel_downloads: network_config.max_parallel_downloads,
 			max_blocks_per_request: network_config.max_blocks_per_request,
 			metrics_registry: None,
 			state_request_protocol_name: state_request_protocol_config.name.clone(),
+			block_downloader: block_relay_params.downloader,
 		};
 		// Initialize syncing strategy.
 		let syncing_strategy = Box::new(
@@ -934,16 +935,14 @@ pub trait TestNetFactory: Default + Sized + Send {
 				metrics,
 				&full_net_config,
 				protocol_id.clone(),
-				&fork_id,
+				fork_id.as_deref(),
 				block_announce_validator,
 				syncing_strategy,
 				chain_sync_network_handle,
 				import_queue.service(),
-				block_relay_params.downloader,
 				peer_store_handle.clone(),
 			)
 			.unwrap();
-		let sync_service_import_queue = Box::new(sync_service.clone());
 		let sync_service = Arc::new(sync_service.clone());
 
 		for config in config.request_response_protocols {
@@ -987,8 +986,12 @@ pub trait TestNetFactory: Default + Sized + Send {
 			chain_sync_network_provider.run(service).await;
 		});
 
-		tokio::spawn(async move {
-			import_queue.run(sync_service_import_queue).await;
+		tokio::spawn({
+			let sync_service = sync_service.clone();
+
+			async move {
+				import_queue.run(sync_service.as_ref()).await;
+			}
 		});
 
 		tokio::spawn(async move {
diff --git a/substrate/client/network/test/src/service.rs b/substrate/client/network/test/src/service.rs
index ad2d1d9ec24de96b00846127b6bb0e71638429b4..688b569c32228448a4e16f7b5a60b175c8411027 100644
--- a/substrate/client/network/test/src/service.rs
+++ b/substrate/client/network/test/src/service.rs
@@ -32,9 +32,9 @@ use sc_network_light::light_client_requests::handler::LightClientRequestHandler;
 use sc_network_sync::{
 	block_request_handler::BlockRequestHandler,
 	engine::SyncingEngine,
-	service::network::{NetworkServiceHandle, NetworkServiceProvider},
+	service::network::NetworkServiceProvider,
 	state_request_handler::StateRequestHandler,
-	strategy::{PolkadotSyncingStrategy, SyncingConfig},
+	strategy::polkadot::{PolkadotSyncingStrategy, PolkadotSyncingStrategyConfig},
 };
 use sp_blockchain::HeaderBackend;
 use sp_runtime::traits::{Block as BlockT, Zero};
@@ -78,7 +78,7 @@ struct TestNetworkBuilder {
 	client: Option<Arc<substrate_test_runtime_client::TestClient>>,
 	listen_addresses: Vec<Multiaddr>,
 	set_config: Option<config::SetConfig>,
-	chain_sync_network: Option<(NetworkServiceProvider, NetworkServiceHandle)>,
+	chain_sync_network: Option<NetworkServiceProvider>,
 	notification_protocols: Vec<config::NonDefaultSetConfig>,
 	config: Option<config::NetworkConfiguration>,
 }
@@ -157,8 +157,9 @@ impl TestNetworkBuilder {
 		let fork_id = Some(String::from("test-fork-id"));
 		let mut full_net_config = FullNetworkConfiguration::new(&network_config, None);
 
-		let (chain_sync_network_provider, chain_sync_network_handle) =
+		let chain_sync_network_provider =
 			self.chain_sync_network.unwrap_or(NetworkServiceProvider::new());
+		let chain_sync_network_handle = chain_sync_network_provider.handle();
 		let mut block_relay_params =
 			BlockRequestHandler::new::<
 				NetworkWorker<
@@ -203,12 +204,13 @@ impl TestNetworkBuilder {
 		let peer_store_handle: Arc<dyn PeerStoreProvider> = Arc::new(peer_store.handle());
 		tokio::spawn(peer_store.run().boxed());
 
-		let syncing_config = SyncingConfig {
+		let syncing_config = PolkadotSyncingStrategyConfig {
 			mode: network_config.sync_mode,
 			max_parallel_downloads: network_config.max_parallel_downloads,
 			max_blocks_per_request: network_config.max_blocks_per_request,
 			metrics_registry: None,
 			state_request_protocol_name: state_request_protocol_config.name.clone(),
+			block_downloader: block_relay_params.downloader,
 		};
 		// Initialize syncing strategy.
 		let syncing_strategy = Box::new(
@@ -222,12 +224,11 @@ impl TestNetworkBuilder {
 			NotificationMetrics::new(None),
 			&full_net_config,
 			protocol_id.clone(),
-			&None,
+			None,
 			Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator),
 			syncing_strategy,
 			chain_sync_network_handle,
 			import_queue.service(),
-			block_relay_params.downloader,
 			Arc::clone(&peer_store_handle),
 		)
 		.unwrap();
diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs
index f27b7ec6fbad87725a560192bd063737ce2157d6..ce4ce7c082483d41e57fa239d16223050f83bd31 100644
--- a/substrate/client/service/src/builder.rs
+++ b/substrate/client/service/src/builder.rs
@@ -35,7 +35,7 @@ use sc_client_api::{
 	BlockBackend, BlockchainEvents, ExecutorProvider, ForkBlocks, StorageProvider, UsageProvider,
 };
 use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, PruningMode};
-use sc_consensus::import_queue::ImportQueue;
+use sc_consensus::import_queue::{ImportQueue, ImportQueueService};
 use sc_executor::{
 	sp_wasm_interface::HostFunctions, HeapAllocStrategy, NativeExecutionDispatch, RuntimeVersionOf,
 	WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY,
@@ -50,15 +50,18 @@ use sc_network::{
 	},
 	NetworkBackend, NetworkStateInfo,
 };
-use sc_network_common::role::Roles;
+use sc_network_common::role::{Role, Roles};
 use sc_network_light::light_client_requests::handler::LightClientRequestHandler;
 use sc_network_sync::{
-	block_relay_protocol::BlockRelayParams,
+	block_relay_protocol::{BlockDownloader, BlockRelayParams},
 	block_request_handler::BlockRequestHandler,
 	engine::SyncingEngine,
-	service::network::NetworkServiceProvider,
+	service::network::{NetworkServiceHandle, NetworkServiceProvider},
 	state_request_handler::StateRequestHandler,
-	strategy::{PolkadotSyncingStrategy, SyncingConfig, SyncingStrategy},
+	strategy::{
+		polkadot::{PolkadotSyncingStrategy, PolkadotSyncingStrategyConfig},
+		SyncingStrategy,
+	},
 	warp_request_handler::RequestHandler as WarpSyncRequestHandler,
 	SyncingService, WarpSyncConfig,
 };
@@ -780,7 +783,7 @@ where
 	Ok(rpc_api)
 }
 
-/// Parameters to pass into `build_network`.
+/// Parameters to pass into [`build_network`].
 pub struct BuildNetworkParams<'a, Block, Net, TxPool, IQ, Client>
 where
 	Block: BlockT,
@@ -802,8 +805,8 @@ where
 	pub block_announce_validator_builder: Option<
 		Box<dyn FnOnce(Arc<Client>) -> Box<dyn BlockAnnounceValidator<Block> + Send> + Send>,
 	>,
-	/// Syncing strategy to use in syncing engine.
-	pub syncing_strategy: Box<dyn SyncingStrategy<Block>>,
+	/// Optional warp sync config.
+	pub warp_sync_config: Option<WarpSyncConfig<Block>>,
 	/// User specified block relay params. If not specified, the default
 	/// block request handler will be used.
 	pub block_relay: Option<BlockRelayParams<Block, Net>>,
@@ -847,100 +850,217 @@ where
 		spawn_handle,
 		import_queue,
 		block_announce_validator_builder,
-		syncing_strategy,
+		warp_sync_config,
 		block_relay,
 		metrics,
 	} = params;
 
-	let protocol_id = config.protocol_id();
-	let genesis_hash = client.info().genesis_hash;
-
 	let block_announce_validator = if let Some(f) = block_announce_validator_builder {
 		f(client.clone())
 	} else {
 		Box::new(DefaultBlockAnnounceValidator)
 	};
 
-	let (chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new();
-	let (mut block_server, block_downloader, block_request_protocol_config) = match block_relay {
-		Some(params) => (params.server, params.downloader, params.request_response_config),
-		None => {
-			// Custom protocol was not specified, use the default block handler.
-			// Allow both outgoing and incoming requests.
-			let params = BlockRequestHandler::new::<Net>(
-				chain_sync_network_handle.clone(),
-				&protocol_id,
-				config.chain_spec.fork_id(),
-				client.clone(),
-				config.network.default_peers_set.in_peers as usize +
-					config.network.default_peers_set.out_peers as usize,
-			);
-			(params.server, params.downloader, params.request_response_config)
+	let network_service_provider = NetworkServiceProvider::new();
+	let protocol_id = config.protocol_id();
+	let fork_id = config.chain_spec.fork_id();
+	let metrics_registry = config.prometheus_config.as_ref().map(|config| &config.registry);
+
+	let block_downloader = match block_relay {
+		Some(params) => {
+			let BlockRelayParams { mut server, downloader, request_response_config } = params;
+
+			net_config.add_request_response_protocol(request_response_config);
+
+			spawn_handle.spawn("block-request-handler", Some("networking"), async move {
+				server.run().await;
+			});
+
+			downloader
 		},
+		None => build_default_block_downloader(
+			&protocol_id,
+			fork_id,
+			&mut net_config,
+			network_service_provider.handle(),
+			Arc::clone(&client),
+			config.network.default_peers_set.in_peers as usize +
+				config.network.default_peers_set.out_peers as usize,
+			&spawn_handle,
+		),
 	};
-	spawn_handle.spawn("block-request-handler", Some("networking"), async move {
-		block_server.run().await;
-	});
+
+	let syncing_strategy = build_polkadot_syncing_strategy(
+		protocol_id.clone(),
+		fork_id,
+		&mut net_config,
+		warp_sync_config,
+		block_downloader,
+		client.clone(),
+		&spawn_handle,
+		metrics_registry,
+	)?;
+
+	let (syncing_engine, sync_service, block_announce_config) = SyncingEngine::new(
+		Roles::from(&config.role),
+		Arc::clone(&client),
+		metrics_registry,
+		metrics.clone(),
+		&net_config,
+		protocol_id.clone(),
+		fork_id,
+		block_announce_validator,
+		syncing_strategy,
+		network_service_provider.handle(),
+		import_queue.service(),
+		net_config.peer_store_handle(),
+	)?;
+
+	spawn_handle.spawn_blocking("syncing", None, syncing_engine.run());
+
+	build_network_advanced(BuildNetworkAdvancedParams {
+		role: config.role,
+		protocol_id,
+		fork_id,
+		ipfs_server: config.network.ipfs_server,
+		announce_block: config.announce_block,
+		net_config,
+		client,
+		transaction_pool,
+		spawn_handle,
+		import_queue,
+		sync_service,
+		block_announce_config,
+		network_service_provider,
+		metrics_registry,
+		metrics,
+	})
+}
+
+/// Parameters to pass into [`build_network_advanced`].
+pub struct BuildNetworkAdvancedParams<'a, Block, Net, TxPool, IQ, Client>
+where
+	Block: BlockT,
+	Net: NetworkBackend<Block, <Block as BlockT>::Hash>,
+{
+	/// Role of the local node.
+	pub role: Role,
+	/// Protocol name prefix.
+	pub protocol_id: ProtocolId,
+	/// Fork ID.
+	pub fork_id: Option<&'a str>,
+	/// Enable serving block data over IPFS bitswap.
+	pub ipfs_server: bool,
+	/// Announce block automatically after they have been imported.
+	pub announce_block: bool,
+	/// Full network configuration.
+	pub net_config: FullNetworkConfiguration<Block, <Block as BlockT>::Hash, Net>,
+	/// A shared client returned by `new_full_parts`.
+	pub client: Arc<Client>,
+	/// A shared transaction pool.
+	pub transaction_pool: Arc<TxPool>,
+	/// A handle for spawning tasks.
+	pub spawn_handle: SpawnTaskHandle,
+	/// An import queue.
+	pub import_queue: IQ,
+	/// Syncing service to communicate with syncing engine.
+	pub sync_service: SyncingService<Block>,
+	/// Block announce config.
+	pub block_announce_config: Net::NotificationProtocolConfig,
+	/// Network service provider to drive with network internally.
+	pub network_service_provider: NetworkServiceProvider,
+	/// Prometheus metrics registry.
+	pub metrics_registry: Option<&'a Registry>,
+	/// Metrics.
+	pub metrics: NotificationMetrics,
+}
+
+/// Build the network service, the network status sinks and an RPC sender, this is a lower-level
+/// version of [`build_network`] for those needing more control.
+pub fn build_network_advanced<Block, Net, TxPool, IQ, Client>(
+	params: BuildNetworkAdvancedParams<Block, Net, TxPool, IQ, Client>,
+) -> Result<
+	(
+		Arc<dyn sc_network::service::traits::NetworkService>,
+		TracingUnboundedSender<sc_rpc::system::Request<Block>>,
+		sc_network_transactions::TransactionsHandlerController<<Block as BlockT>::Hash>,
+		NetworkStarter,
+		Arc<SyncingService<Block>>,
+	),
+	Error,
+>
+where
+	Block: BlockT,
+	Client: ProvideRuntimeApi<Block>
+		+ HeaderMetadata<Block, Error = sp_blockchain::Error>
+		+ Chain<Block>
+		+ BlockBackend<Block>
+		+ BlockIdTo<Block, Error = sp_blockchain::Error>
+		+ ProofProvider<Block>
+		+ HeaderBackend<Block>
+		+ BlockchainEvents<Block>
+		+ 'static,
+	TxPool: TransactionPool<Block = Block, Hash = <Block as BlockT>::Hash> + 'static,
+	IQ: ImportQueue<Block> + 'static,
+	Net: NetworkBackend<Block, <Block as BlockT>::Hash>,
+{
+	let BuildNetworkAdvancedParams {
+		role,
+		protocol_id,
+		fork_id,
+		ipfs_server,
+		announce_block,
+		mut net_config,
+		client,
+		transaction_pool,
+		spawn_handle,
+		import_queue,
+		sync_service,
+		block_announce_config,
+		network_service_provider,
+		metrics_registry,
+		metrics,
+	} = params;
+
+	let genesis_hash = client.info().genesis_hash;
 
 	let light_client_request_protocol_config = {
 		// Allow both outgoing and incoming requests.
-		let (handler, protocol_config) = LightClientRequestHandler::new::<Net>(
-			&protocol_id,
-			config.chain_spec.fork_id(),
-			client.clone(),
-		);
+		let (handler, protocol_config) =
+			LightClientRequestHandler::new::<Net>(&protocol_id, fork_id, client.clone());
 		spawn_handle.spawn("light-client-request-handler", Some("networking"), handler.run());
 		protocol_config
 	};
 
 	// install request handlers to `FullNetworkConfiguration`
-	net_config.add_request_response_protocol(block_request_protocol_config);
 	net_config.add_request_response_protocol(light_client_request_protocol_config);
 
-	let bitswap_config = config.network.ipfs_server.then(|| {
+	let bitswap_config = ipfs_server.then(|| {
 		let (handler, config) = Net::bitswap_server(client.clone());
 		spawn_handle.spawn("bitswap-request-handler", Some("networking"), handler);
 
 		config
 	});
 
-	// create transactions protocol and add it to the list of supported protocols of
-	let peer_store_handle = net_config.peer_store_handle();
+	// Create transactions protocol and add it to the list of supported protocols of
 	let (transactions_handler_proto, transactions_config) =
 		sc_network_transactions::TransactionsHandlerPrototype::new::<_, Block, Net>(
 			protocol_id.clone(),
 			genesis_hash,
-			config.chain_spec.fork_id(),
+			fork_id,
 			metrics.clone(),
-			Arc::clone(&peer_store_handle),
+			net_config.peer_store_handle(),
 		);
 	net_config.add_notification_protocol(transactions_config);
 
 	// Start task for `PeerStore`
 	let peer_store = net_config.take_peer_store();
-	let peer_store_handle = peer_store.handle();
 	spawn_handle.spawn("peer-store", Some("networking"), peer_store.run());
 
-	let (engine, sync_service, block_announce_config) = SyncingEngine::new(
-		Roles::from(&config.role),
-		client.clone(),
-		config.prometheus_config.as_ref().map(|config| config.registry.clone()).as_ref(),
-		metrics.clone(),
-		&net_config,
-		protocol_id.clone(),
-		&config.chain_spec.fork_id().map(ToOwned::to_owned),
-		block_announce_validator,
-		syncing_strategy,
-		chain_sync_network_handle,
-		import_queue.service(),
-		block_downloader,
-		Arc::clone(&peer_store_handle),
-	)?;
-	let sync_service_import_queue = sync_service.clone();
 	let sync_service = Arc::new(sync_service);
 
 	let network_params = sc_network::config::Params::<Block, <Block as BlockT>::Hash, Net> {
-		role: config.role,
+		role,
 		executor: {
 			let spawn_handle = Clone::clone(&spawn_handle);
 			Box::new(move |fut| {
@@ -950,8 +1070,8 @@ where
 		network_config: net_config,
 		genesis_hash,
 		protocol_id,
-		fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned),
-		metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()),
+		fork_id: fork_id.map(ToOwned::to_owned),
+		metrics_registry: metrics_registry.cloned(),
 		block_announce_config,
 		bitswap_config,
 		notification_metrics: metrics,
@@ -965,7 +1085,7 @@ where
 		network.clone(),
 		sync_service.clone(),
 		Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }),
-		config.prometheus_config.as_ref().map(|config| &config.registry),
+		metrics_registry,
 	)?;
 	spawn_handle.spawn_blocking(
 		"network-transactions-handler",
@@ -976,17 +1096,20 @@ where
 	spawn_handle.spawn_blocking(
 		"chain-sync-network-service-provider",
 		Some("networking"),
-		chain_sync_network_provider.run(Arc::new(network.clone())),
+		network_service_provider.run(Arc::new(network.clone())),
 	);
-	spawn_handle.spawn("import-queue", None, import_queue.run(Box::new(sync_service_import_queue)));
-	spawn_handle.spawn_blocking("syncing", None, engine.run());
+	spawn_handle.spawn("import-queue", None, {
+		let sync_service = sync_service.clone();
+
+		async move { import_queue.run(sync_service.as_ref()).await }
+	});
 
 	let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc", 10_000);
 	spawn_handle.spawn(
 		"system-rpc-handler",
 		Some("networking"),
 		build_system_rpc_future::<_, _, <Block as BlockT>::Hash>(
-			config.role,
+			role,
 			network_mut.network_service(),
 			sync_service.clone(),
 			client.clone(),
@@ -999,7 +1122,7 @@ where
 		network_mut,
 		client,
 		sync_service.clone(),
-		config.announce_block,
+		announce_block,
 	);
 
 	// TODO: Normally, one is supposed to pass a list of notifications protocols supported by the
@@ -1047,12 +1170,154 @@ where
 	))
 }
 
+/// Configuration for [`build_default_syncing_engine`].
+pub struct DefaultSyncingEngineConfig<'a, Block, Client, Net>
+where
+	Block: BlockT,
+	Net: NetworkBackend<Block, <Block as BlockT>::Hash>,
+{
+	/// Role of the local node.
+	pub role: Role,
+	/// Protocol name prefix.
+	pub protocol_id: ProtocolId,
+	/// Fork ID.
+	pub fork_id: Option<&'a str>,
+	/// Full network configuration.
+	pub net_config: &'a mut FullNetworkConfiguration<Block, <Block as BlockT>::Hash, Net>,
+	/// Validator for incoming block announcements.
+	pub block_announce_validator: Box<dyn BlockAnnounceValidator<Block> + Send>,
+	/// Handle to communicate with `NetworkService`.
+	pub network_service_handle: NetworkServiceHandle,
+	/// Warp sync configuration (when used).
+	pub warp_sync_config: Option<WarpSyncConfig<Block>>,
+	/// A shared client returned by `new_full_parts`.
+	pub client: Arc<Client>,
+	/// Blocks import queue API.
+	pub import_queue_service: Box<dyn ImportQueueService<Block>>,
+	/// Expected max total number of peer connections (in + out).
+	pub num_peers_hint: usize,
+	/// A handle for spawning tasks.
+	pub spawn_handle: &'a SpawnTaskHandle,
+	/// Prometheus metrics registry.
+	pub metrics_registry: Option<&'a Registry>,
+	/// Metrics.
+	pub metrics: NotificationMetrics,
+}
+
+/// Build default syncing engine using [`build_default_block_downloader`] and
+/// [`build_polkadot_syncing_strategy`] internally.
+pub fn build_default_syncing_engine<Block, Client, Net>(
+	config: DefaultSyncingEngineConfig<Block, Client, Net>,
+) -> Result<(SyncingService<Block>, Net::NotificationProtocolConfig), Error>
+where
+	Block: BlockT,
+	Client: HeaderBackend<Block>
+		+ BlockBackend<Block>
+		+ HeaderMetadata<Block, Error = sp_blockchain::Error>
+		+ ProofProvider<Block>
+		+ Send
+		+ Sync
+		+ 'static,
+	Net: NetworkBackend<Block, <Block as BlockT>::Hash>,
+{
+	let DefaultSyncingEngineConfig {
+		role,
+		protocol_id,
+		fork_id,
+		net_config,
+		block_announce_validator,
+		network_service_handle,
+		warp_sync_config,
+		client,
+		import_queue_service,
+		num_peers_hint,
+		spawn_handle,
+		metrics_registry,
+		metrics,
+	} = config;
+
+	let block_downloader = build_default_block_downloader(
+		&protocol_id,
+		fork_id,
+		net_config,
+		network_service_handle.clone(),
+		client.clone(),
+		num_peers_hint,
+		spawn_handle,
+	);
+	let syncing_strategy = build_polkadot_syncing_strategy(
+		protocol_id.clone(),
+		fork_id,
+		net_config,
+		warp_sync_config,
+		block_downloader,
+		client.clone(),
+		spawn_handle,
+		metrics_registry,
+	)?;
+
+	let (syncing_engine, sync_service, block_announce_config) = SyncingEngine::new(
+		Roles::from(&role),
+		client,
+		metrics_registry,
+		metrics,
+		&net_config,
+		protocol_id,
+		fork_id,
+		block_announce_validator,
+		syncing_strategy,
+		network_service_handle,
+		import_queue_service,
+		net_config.peer_store_handle(),
+	)?;
+
+	spawn_handle.spawn_blocking("syncing", None, syncing_engine.run());
+
+	Ok((sync_service, block_announce_config))
+}
+
+/// Build default block downloader
+pub fn build_default_block_downloader<Block, Client, Net>(
+	protocol_id: &ProtocolId,
+	fork_id: Option<&str>,
+	net_config: &mut FullNetworkConfiguration<Block, <Block as BlockT>::Hash, Net>,
+	network_service_handle: NetworkServiceHandle,
+	client: Arc<Client>,
+	num_peers_hint: usize,
+	spawn_handle: &SpawnTaskHandle,
+) -> Arc<dyn BlockDownloader<Block>>
+where
+	Block: BlockT,
+	Client: HeaderBackend<Block> + BlockBackend<Block> + Send + Sync + 'static,
+	Net: NetworkBackend<Block, <Block as BlockT>::Hash>,
+{
+	// Custom protocol was not specified, use the default block handler.
+	// Allow both outgoing and incoming requests.
+	let BlockRelayParams { mut server, downloader, request_response_config } =
+		BlockRequestHandler::new::<Net>(
+			network_service_handle,
+			&protocol_id,
+			fork_id,
+			client.clone(),
+			num_peers_hint,
+		);
+
+	spawn_handle.spawn("block-request-handler", Some("networking"), async move {
+		server.run().await;
+	});
+
+	net_config.add_request_response_protocol(request_response_config);
+
+	downloader
+}
+
 /// Build standard polkadot syncing strategy
 pub fn build_polkadot_syncing_strategy<Block, Client, Net>(
 	protocol_id: ProtocolId,
 	fork_id: Option<&str>,
 	net_config: &mut FullNetworkConfiguration<Block, <Block as BlockT>::Hash, Net>,
 	warp_sync_config: Option<WarpSyncConfig<Block>>,
+	block_downloader: Arc<dyn BlockDownloader<Block>>,
 	client: Arc<Client>,
 	spawn_handle: &SpawnTaskHandle,
 	metrics_registry: Option<&Registry>,
@@ -1066,7 +1331,6 @@ where
 		+ Send
 		+ Sync
 		+ 'static,
-
 	Net: NetworkBackend<Block, <Block as BlockT>::Hash>,
 {
 	if warp_sync_config.is_none() && net_config.network_config.sync_mode.is_warp() {
@@ -1117,12 +1381,13 @@ where
 		net_config.add_request_response_protocol(config);
 	}
 
-	let syncing_config = SyncingConfig {
+	let syncing_config = PolkadotSyncingStrategyConfig {
 		mode: net_config.network_config.sync_mode,
 		max_parallel_downloads: net_config.network_config.max_parallel_downloads,
 		max_blocks_per_request: net_config.network_config.max_blocks_per_request,
 		metrics_registry: metrics_registry.cloned(),
 		state_request_protocol_name,
+		block_downloader,
 	};
 	Ok(Box::new(PolkadotSyncingStrategy::new(
 		syncing_config,
diff --git a/substrate/client/service/src/chain_ops/import_blocks.rs b/substrate/client/service/src/chain_ops/import_blocks.rs
index 661fc09a8f19e55c38bec280b7ce399a8dbe2e29..8e759faa0775d6aa2f3adbec83273079aded4498 100644
--- a/substrate/client/service/src/chain_ops/import_blocks.rs
+++ b/substrate/client/service/src/chain_ops/import_blocks.rs
@@ -37,6 +37,10 @@ use sp_runtime::{
 use std::{
 	io::Read,
 	pin::Pin,
+	sync::{
+		atomic::{AtomicBool, AtomicU64, Ordering},
+		Arc,
+	},
 	task::Poll,
 	time::{Duration, Instant},
 };
@@ -50,8 +54,6 @@ const DELAY_TIME: u64 = 200;
 /// Number of milliseconds that must have passed between two updates.
 const TIME_BETWEEN_UPDATES: u64 = 3_000;
 
-use std::sync::Arc;
-
 /// Build a chain spec json
 pub fn build_spec(spec: &dyn ChainSpec, raw: bool) -> error::Result<String> {
 	spec.as_json(raw).map_err(Into::into)
@@ -301,29 +303,29 @@ where
 	IQ: ImportQueue<B> + 'static,
 {
 	struct WaitLink {
-		imported_blocks: u64,
-		has_error: bool,
+		imported_blocks: AtomicU64,
+		has_error: AtomicBool,
 	}
 
 	impl WaitLink {
 		fn new() -> WaitLink {
-			WaitLink { imported_blocks: 0, has_error: false }
+			WaitLink { imported_blocks: AtomicU64::new(0), has_error: AtomicBool::new(false) }
 		}
 	}
 
 	impl<B: BlockT> Link<B> for WaitLink {
 		fn blocks_processed(
-			&mut self,
+			&self,
 			imported: usize,
 			_num_expected_blocks: usize,
 			results: Vec<(Result<BlockImportStatus<NumberFor<B>>, BlockImportError>, B::Hash)>,
 		) {
-			self.imported_blocks += imported as u64;
+			self.imported_blocks.fetch_add(imported as u64, Ordering::AcqRel);
 
 			for result in results {
 				if let (Err(err), hash) = result {
 					warn!("There was an error importing block with hash {:?}: {}", hash, err);
-					self.has_error = true;
+					self.has_error.store(true, Ordering::Release);
 					break
 				}
 			}
@@ -373,7 +375,9 @@ where
 						let read_block_count = block_iter.read_block_count();
 						match block_result {
 							Ok(block) => {
-								if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS {
+								if read_block_count - link.imported_blocks.load(Ordering::Acquire) >=
+									MAX_PENDING_BLOCKS
+								{
 									// The queue is full, so do not add this block and simply wait
 									// until the queue has made some progress.
 									let delay = Delay::new(Duration::from_millis(DELAY_TIME));
@@ -399,7 +403,9 @@ where
 			},
 			ImportState::WaitingForImportQueueToCatchUp { block_iter, mut delay, block } => {
 				let read_block_count = block_iter.read_block_count();
-				if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS {
+				if read_block_count - link.imported_blocks.load(Ordering::Acquire) >=
+					MAX_PENDING_BLOCKS
+				{
 					// Queue is still full, so wait until there is room to insert our block.
 					match Pin::new(&mut delay).poll(cx) {
 						Poll::Pending => {
@@ -433,7 +439,11 @@ where
 			} => {
 				// All the blocks have been added to the queue, which doesn't mean they
 				// have all been properly imported.
-				if importing_is_done(num_expected_blocks, read_block_count, link.imported_blocks) {
+				if importing_is_done(
+					num_expected_blocks,
+					read_block_count,
+					link.imported_blocks.load(Ordering::Acquire),
+				) {
 					// Importing is done, we can log the result and return.
 					info!(
 						"🎉 Imported {} blocks. Best: #{}",
@@ -472,10 +482,10 @@ where
 		let best_number = client.info().best_number;
 		speedometer.notify_user(best_number);
 
-		if link.has_error {
+		if link.has_error.load(Ordering::Acquire) {
 			return Poll::Ready(Err(Error::Other(format!(
 				"Stopping after #{} blocks because of an error",
-				link.imported_blocks
+				link.imported_blocks.load(Ordering::Acquire)
 			))))
 		}
 
diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs
index 3df9020b04180d798d4eb370c1de049bec10bef0..ee4f4e7622e74b8f07225edd729d32b3b4e98df1 100644
--- a/substrate/client/service/src/lib.rs
+++ b/substrate/client/service/src/lib.rs
@@ -59,11 +59,13 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT};
 
 pub use self::{
 	builder::{
-		build_network, build_polkadot_syncing_strategy, gen_rpc_module, init_telemetry, new_client,
-		new_db_backend, new_full_client, new_full_parts, new_full_parts_record_import,
+		build_default_block_downloader, build_default_syncing_engine, build_network,
+		build_network_advanced, build_polkadot_syncing_strategy, gen_rpc_module, init_telemetry,
+		new_client, new_db_backend, new_full_client, new_full_parts, new_full_parts_record_import,
 		new_full_parts_with_genesis_builder, new_wasm_executor,
-		propagate_transaction_notifications, spawn_tasks, BuildNetworkParams, KeystoreContainer,
-		NetworkStarter, SpawnTasksParams, TFullBackend, TFullCallExecutor, TFullClient,
+		propagate_transaction_notifications, spawn_tasks, BuildNetworkAdvancedParams,
+		BuildNetworkParams, DefaultSyncingEngineConfig, KeystoreContainer, NetworkStarter,
+		SpawnTasksParams, TFullBackend, TFullCallExecutor, TFullClient,
 	},
 	client::{ClientConfig, LocalCallExecutor},
 	error::Error,
diff --git a/templates/minimal/node/src/service.rs b/templates/minimal/node/src/service.rs
index f9a9d1e0f3cfe9aba3dc380acbd514f76b9bb4e1..b4e6fc0b728b60c8930eded042fdc6c03e3be1e8 100644
--- a/templates/minimal/node/src/service.rs
+++ b/templates/minimal/node/src/service.rs
@@ -21,9 +21,7 @@ use minimal_template_runtime::{interface::OpaqueBlock as Block, RuntimeApi};
 use polkadot_sdk::{
 	sc_client_api::backend::Backend,
 	sc_executor::WasmExecutor,
-	sc_service::{
-		build_polkadot_syncing_strategy, error::Error as ServiceError, Configuration, TaskManager,
-	},
+	sc_service::{error::Error as ServiceError, Configuration, TaskManager},
 	sc_telemetry::{Telemetry, TelemetryWorker},
 	sc_transaction_pool_api::OffchainTransactionPoolFactory,
 	sp_runtime::traits::Block as BlockT,
@@ -124,7 +122,7 @@ pub fn new_full<Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Ha
 		other: mut telemetry,
 	} = new_partial(&config)?;
 
-	let mut net_config = sc_network::config::FullNetworkConfiguration::<
+	let net_config = sc_network::config::FullNetworkConfiguration::<
 		Block,
 		<Block as BlockT>::Hash,
 		Network,
@@ -136,26 +134,16 @@ pub fn new_full<Network: sc_network::NetworkBackend<Block, <Block as BlockT>::Ha
 		config.prometheus_config.as_ref().map(|cfg| &cfg.registry),
 	);
 
-	let syncing_strategy = build_polkadot_syncing_strategy(
-		config.protocol_id(),
-		config.chain_spec.fork_id(),
-		&mut net_config,
-		None,
-		client.clone(),
-		&task_manager.spawn_handle(),
-		config.prometheus_config.as_ref().map(|config| &config.registry),
-	)?;
-
 	let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
 		sc_service::build_network(sc_service::BuildNetworkParams {
 			config: &config,
+			net_config,
 			client: client.clone(),
 			transaction_pool: transaction_pool.clone(),
 			spawn_handle: task_manager.spawn_handle(),
 			import_queue,
-			net_config,
 			block_announce_validator_builder: None,
-			syncing_strategy,
+			warp_sync_config: None,
 			block_relay: None,
 			metrics,
 		})?;
diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml
index 8a3c7d0ac78002590e64d06e72460c7afa6b2270..4c0ab31df95e2b95b0fd1d9fb548c98cf6862d90 100644
--- a/templates/solochain/node/Cargo.toml
+++ b/templates/solochain/node/Cargo.toml
@@ -30,9 +30,9 @@ sc-telemetry = { workspace = true, default-features = true }
 sc-transaction-pool = { workspace = true, default-features = true }
 sc-transaction-pool-api = { workspace = true, default-features = true }
 sc-offchain = { workspace = true, default-features = true }
+sc-consensus = { workspace = true, default-features = true }
 sc-consensus-aura = { workspace = true, default-features = true }
 sp-consensus-aura = { workspace = true, default-features = true }
-sc-consensus = { workspace = true, default-features = true }
 sc-consensus-grandpa = { workspace = true, default-features = true }
 sp-consensus-grandpa = { workspace = true, default-features = true }
 sp-genesis-builder = { workspace = true, default-features = true }
diff --git a/templates/solochain/node/src/service.rs b/templates/solochain/node/src/service.rs
index 2524906fd508f54630c51f7bf9b74f6be25d47e6..d6fcebe239f7e7a95e065ab266a3c6149b0c9c5b 100644
--- a/templates/solochain/node/src/service.rs
+++ b/templates/solochain/node/src/service.rs
@@ -4,10 +4,7 @@ use futures::FutureExt;
 use sc_client_api::{Backend, BlockBackend};
 use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
 use sc_consensus_grandpa::SharedVoterState;
-use sc_service::{
-	build_polkadot_syncing_strategy, error::Error as ServiceError, Configuration, TaskManager,
-	WarpSyncConfig,
-};
+use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncConfig};
 use sc_telemetry::{Telemetry, TelemetryWorker};
 use sc_transaction_pool_api::OffchainTransactionPoolFactory;
 use solochain_template_runtime::{self, apis::RuntimeApi, opaque::Block};
@@ -172,16 +169,6 @@ pub fn new_full<
 		Vec::default(),
 	));
 
-	let syncing_strategy = build_polkadot_syncing_strategy(
-		config.protocol_id(),
-		config.chain_spec.fork_id(),
-		&mut net_config,
-		Some(WarpSyncConfig::WithProvider(warp_sync)),
-		client.clone(),
-		&task_manager.spawn_handle(),
-		config.prometheus_config.as_ref().map(|config| &config.registry),
-	)?;
-
 	let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
 		sc_service::build_network(sc_service::BuildNetworkParams {
 			config: &config,
@@ -191,7 +178,7 @@ pub fn new_full<
 			spawn_handle: task_manager.spawn_handle(),
 			import_queue,
 			block_announce_validator_builder: None,
-			syncing_strategy,
+			warp_sync_config: Some(WarpSyncConfig::WithProvider(warp_sync)),
 			block_relay: None,
 			metrics,
 		})?;