diff --git a/polkadot/Cargo.lock b/polkadot/Cargo.lock
index a1414fef8edfd407e949af27000a25ea13fe67c3..7b789052c03fa94eeef85cf93fd1b11f398af943 100644
--- a/polkadot/Cargo.lock
+++ b/polkadot/Cargo.lock
@@ -5993,6 +5993,7 @@ dependencies = [
  "polkadot-node-subsystem",
  "polkadot-node-subsystem-test-helpers",
  "polkadot-node-subsystem-util",
+ "polkadot-overseer",
  "polkadot-primitives",
  "sc-authority-discovery",
  "sc-network",
@@ -6236,7 +6237,6 @@ dependencies = [
  "futures 0.3.15",
  "futures-timer 3.0.2",
  "polkadot-node-subsystem",
- "polkadot-overseer",
  "polkadot-primitives",
  "sp-blockchain",
  "sp-inherents",
@@ -6331,6 +6331,21 @@ dependencies = [
  "thiserror",
 ]
 
+[[package]]
+name = "polkadot-node-metrics"
+version = "0.1.0"
+dependencies = [
+ "async-trait",
+ "futures 0.3.15",
+ "futures-timer 3.0.2",
+ "metered-channel",
+ "sc-network",
+ "sp-application-crypto",
+ "sp-core",
+ "sp-keystore",
+ "substrate-prometheus-endpoint",
+]
+
 [[package]]
 name = "polkadot-node-network-protocol"
 version = "0.1.0"
@@ -6371,52 +6386,61 @@ dependencies = [
 name = "polkadot-node-subsystem"
 version = "0.1.0"
 dependencies = [
- "assert_matches",
- "async-std",
+ "polkadot-node-jaeger",
+ "polkadot-node-subsystem-types",
+ "polkadot-overseer",
+]
+
+[[package]]
+name = "polkadot-node-subsystem-test-helpers"
+version = "0.1.0"
+dependencies = [
  "async-trait",
- "derive_more",
  "futures 0.3.15",
  "futures-timer 3.0.2",
- "lazy_static",
- "log",
- "mick-jaeger",
  "parity-scale-codec",
  "parking_lot 0.11.1",
  "pin-project 1.0.7",
- "polkadot-node-jaeger",
- "polkadot-node-network-protocol",
  "polkadot-node-primitives",
- "polkadot-node-subsystem-test-helpers",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-overseer",
  "polkadot-primitives",
- "polkadot-procmacro-subsystem-dispatch-gen",
  "polkadot-statement-table",
  "sc-network",
  "smallvec 1.6.1",
  "sp-core",
- "substrate-prometheus-endpoint",
- "thiserror",
  "tracing",
 ]
 
 [[package]]
-name = "polkadot-node-subsystem-test-helpers"
+name = "polkadot-node-subsystem-types"
 version = "0.1.0"
 dependencies = [
+ "assert_matches",
+ "async-std",
  "async-trait",
+ "derive_more",
  "futures 0.3.15",
  "futures-timer 3.0.2",
+ "lazy_static",
+ "log",
+ "mick-jaeger",
  "parity-scale-codec",
  "parking_lot 0.11.1",
  "pin-project 1.0.7",
+ "polkadot-node-jaeger",
+ "polkadot-node-network-protocol",
  "polkadot-node-primitives",
- "polkadot-node-subsystem",
- "polkadot-node-subsystem-util",
- "polkadot-overseer",
+ "polkadot-node-subsystem-test-helpers",
+ "polkadot-overseer-gen",
  "polkadot-primitives",
  "polkadot-statement-table",
  "sc-network",
  "smallvec 1.6.1",
  "sp-core",
+ "substrate-prometheus-endpoint",
+ "thiserror",
  "tracing",
 ]
 
@@ -6437,10 +6461,12 @@ dependencies = [
  "parking_lot 0.11.1",
  "pin-project 1.0.7",
  "polkadot-node-jaeger",
+ "polkadot-node-metrics",
  "polkadot-node-network-protocol",
  "polkadot-node-primitives",
  "polkadot-node-subsystem",
  "polkadot-node-subsystem-test-helpers",
+ "polkadot-overseer",
  "polkadot-primitives",
  "rand 0.8.4",
  "sc-network",
@@ -6463,18 +6489,59 @@ dependencies = [
  "futures-timer 3.0.2",
  "kv-log-macro",
  "lru",
+ "metered-channel",
+ "polkadot-node-metrics",
  "polkadot-node-network-protocol",
  "polkadot-node-primitives",
- "polkadot-node-subsystem",
- "polkadot-node-subsystem-util",
+ "polkadot-node-subsystem-types",
+ "polkadot-overseer-all-subsystems-gen",
+ "polkadot-overseer-gen",
  "polkadot-primitives",
- "polkadot-procmacro-overseer-subsystems-gen",
  "sc-client-api",
  "sp-api",
  "sp-core",
  "tracing",
 ]
 
+[[package]]
+name = "polkadot-overseer-all-subsystems-gen"
+version = "0.1.0"
+dependencies = [
+ "assert_matches",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "trybuild",
+]
+
+[[package]]
+name = "polkadot-overseer-gen"
+version = "0.1.0"
+dependencies = [
+ "async-trait",
+ "futures 0.3.15",
+ "futures-timer 3.0.2",
+ "metered-channel",
+ "pin-project 1.0.7",
+ "polkadot-node-network-protocol",
+ "polkadot-overseer-gen-proc-macro",
+ "sp-core",
+ "thiserror",
+ "tracing",
+ "trybuild",
+]
+
+[[package]]
+name = "polkadot-overseer-gen-proc-macro"
+version = "0.1.0"
+dependencies = [
+ "assert_matches",
+ "proc-macro-crate 1.0.0",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
 [[package]]
 name = "polkadot-parachain"
 version = "0.9.8"
@@ -6520,28 +6587,6 @@ dependencies = [
  "thiserror",
 ]
 
-[[package]]
-name = "polkadot-procmacro-overseer-subsystems-gen"
-version = "0.1.0"
-dependencies = [
- "assert_matches",
- "proc-macro2",
- "quote",
- "syn",
- "trybuild",
-]
-
-[[package]]
-name = "polkadot-procmacro-subsystem-dispatch-gen"
-version = "0.1.0"
-dependencies = [
- "assert_matches",
- "proc-macro2",
- "quote",
- "syn",
- "trybuild",
-]
-
 [[package]]
 name = "polkadot-rpc"
 version = "0.9.8"
@@ -7202,9 +7247,9 @@ checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a"
 
 [[package]]
 name = "proc-macro2"
-version = "1.0.24"
+version = "1.0.27"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
+checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
 dependencies = [
  "unicode-xid",
 ]
diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml
index a9cd823c8d25335fcaa66f1506550fa79e92400e..f03e91485d2f5d2bc57d8796e70437cc25301df7 100644
--- a/polkadot/Cargo.toml
+++ b/polkadot/Cargo.toml
@@ -65,14 +65,18 @@ members = [
 	"node/network/collator-protocol",
 	"node/network/gossip-support",
 	"node/overseer",
+	"node/overseer/overseer-gen",
+	"node/overseer/overseer-gen/proc-macro",
+	"node/overseer/all-subsystems-gen",
 	"node/malus",
 	"node/primitives",
 	"node/service",
 	"node/subsystem",
-	"node/subsystem/dispatch-gen",
+	"node/subsystem-types",
 	"node/subsystem-test-helpers",
 	"node/subsystem-util",
 	"node/jaeger",
+	"node/metrics",
 	"node/metered-channel",
 	"node/test/client",
 	"node/test/service",
diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs
index 0d23cf25959bce637d425b58db0520713adf7d3c..72a1beaeafe8c08ecf2f22c30d42556749bb0b12 100644
--- a/polkadot/node/collation-generation/src/lib.rs
+++ b/polkadot/node/collation-generation/src/lib.rs
@@ -30,8 +30,11 @@ use polkadot_node_primitives::{
 	CollationGenerationConfig, AvailableData, PoV,
 };
 use polkadot_node_subsystem::{
+	ActiveLeavesUpdate,
 	messages::{AllMessages, CollationGenerationMessage, CollatorProtocolMessage},
-	FromOverseer, SpawnedSubsystem, Subsystem, SubsystemContext, SubsystemResult,
+	SpawnedSubsystem, SubsystemContext, SubsystemResult,
+	SubsystemError, FromOverseer, OverseerSignal,
+	overseer,
 };
 use polkadot_node_subsystem_util::{
 	request_availability_cores, request_persisted_validation_data,
@@ -83,6 +86,7 @@ impl CollationGenerationSubsystem {
 	async fn run<Context>(mut self, mut ctx: Context)
 	where
 		Context: SubsystemContext<Message = CollationGenerationMessage>,
+		Context: overseer::SubsystemContext<Message = CollationGenerationMessage>,
 	{
 		// when we activate new leaves, we spawn a bunch of sub-tasks, each of which is
 		// expected to generate precisely one message. We don't want to block the main loop
@@ -114,19 +118,16 @@ impl CollationGenerationSubsystem {
 	// it should hopefully therefore be ok that it's an async function mutably borrowing self.
 	async fn handle_incoming<Context>(
 		&mut self,
-		incoming: SubsystemResult<FromOverseer<Context::Message>>,
+		incoming: SubsystemResult<FromOverseer<<Context as SubsystemContext>::Message>>,
 		ctx: &mut Context,
 		sender: &mpsc::Sender<AllMessages>,
 	) -> bool
 	where
 		Context: SubsystemContext<Message = CollationGenerationMessage>,
+		Context: overseer::SubsystemContext<Message = CollationGenerationMessage>,
 	{
-		use polkadot_node_subsystem::ActiveLeavesUpdate;
-		use polkadot_node_subsystem::FromOverseer::{Communication, Signal};
-		use polkadot_node_subsystem::OverseerSignal::{ActiveLeaves, BlockFinalized, Conclude};
-
 		match incoming {
-			Ok(Signal(ActiveLeaves(ActiveLeavesUpdate { activated, .. }))) => {
+			Ok(FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, .. }))) => {
 				// follow the procedure from the guide
 				if let Some(config) = &self.config {
 					let metrics = self.metrics.clone();
@@ -143,8 +144,8 @@ impl CollationGenerationSubsystem {
 
 				false
 			}
-			Ok(Signal(Conclude)) => true,
-			Ok(Communication {
+			Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => true,
+			Ok(FromOverseer::Communication {
 				msg: CollationGenerationMessage::Initialize(config),
 			}) => {
 				if self.config.is_some() {
@@ -154,7 +155,7 @@ impl CollationGenerationSubsystem {
 				}
 				false
 			}
-			Ok(Signal(BlockFinalized(..))) => false,
+			Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(..))) => false,
 			Err(err) => {
 				tracing::error!(
 					target: LOG_TARGET,
@@ -168,9 +169,10 @@ impl CollationGenerationSubsystem {
 	}
 }
 
-impl<Context> Subsystem<Context> for CollationGenerationSubsystem
+impl<Context> overseer::Subsystem<Context, SubsystemError> for CollationGenerationSubsystem
 where
 	Context: SubsystemContext<Message = CollationGenerationMessage>,
+	Context: overseer::SubsystemContext<Message = CollationGenerationMessage>,
 {
 	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		let future = async move {
diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs
index 247b0d28d9da85f85b16c66b8a3845e3eaeeba36..93bcd69e74af5e3ec06cd0fe43c6e0fd20ee70b2 100644
--- a/polkadot/node/core/approval-voting/src/import.rs
+++ b/polkadot/node/core/approval-voting/src/import.rs
@@ -29,6 +29,7 @@
 //! We maintain a rolling window of session indices. This starts as empty
 
 use polkadot_node_subsystem::{
+	overseer,
 	messages::{
 		RuntimeApiMessage, RuntimeApiRequest, ChainApiMessage, ApprovalDistributionMessage,
 		ChainSelectionMessage,
@@ -84,7 +85,7 @@ struct ImportedBlockInfoEnv<'a> {
 // Computes information about the imported block. Returns `None` if the info couldn't be extracted -
 // failure to communicate with overseer,
 async fn imported_block_info(
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	env: ImportedBlockInfoEnv<'_>,
 	block_hash: Hash,
 	block_header: &Header,
@@ -98,7 +99,7 @@ async fn imported_block_info(
 		ctx.send_message(RuntimeApiMessage::Request(
 			block_hash,
 			RuntimeApiRequest::CandidateEvents(c_tx),
-		).into()).await;
+		)).await;
 
 		let events: Vec<CandidateEvent> = match c_rx.await {
 			Ok(Ok(events)) => events,
@@ -120,7 +121,7 @@ async fn imported_block_info(
 		ctx.send_message(RuntimeApiMessage::Request(
 			block_header.parent_hash,
 			RuntimeApiRequest::SessionIndexForChild(s_tx),
-		).into()).await;
+		)).await;
 
 		let session_index = match s_rx.await {
 			Ok(Ok(s)) => s,
@@ -161,7 +162,7 @@ async fn imported_block_info(
 		ctx.send_message(RuntimeApiMessage::Request(
 			block_hash,
 			RuntimeApiRequest::CurrentBabeEpoch(s_tx),
-		).into()).await;
+		)).await;
 
 		match s_rx.await {
 			Ok(Ok(s)) => s,
@@ -284,20 +285,21 @@ pub struct BlockImportedCandidates {
 ///   * and return information about all candidates imported under each block.
 ///
 /// It is the responsibility of the caller to schedule wakeups for each block.
-pub(crate) async fn handle_new_head<'a>(
-	ctx: &mut impl SubsystemContext,
+pub(crate) async fn handle_new_head(
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	state: &mut State,
-	db: &mut OverlayedBackend<'a, impl Backend>,
+	db: &mut OverlayedBackend<'_, impl Backend>,
 	head: Hash,
 	finalized_number: &Option<BlockNumber>,
-) -> SubsystemResult<Vec<BlockImportedCandidates>> {
+) -> SubsystemResult<Vec<BlockImportedCandidates>>
+{
 	// Update session info based on most recent head.
 
 	let mut span = jaeger::Span::new(head, "approval-checking-import");
 
 	let header = {
 		let (h_tx, h_rx) = oneshot::channel();
-		ctx.send_message(ChainApiMessage::BlockHeader(head, h_tx).into()).await;
+		ctx.send_message(ChainApiMessage::BlockHeader(head, h_tx)).await;
 
 		match h_rx.await? {
 			Err(e) => {
@@ -375,7 +377,7 @@ pub(crate) async fn handle_new_head<'a>(
 					// It's possible that we've lost a race with finality.
 					let (tx, rx) = oneshot::channel();
 					ctx.send_message(
-						ChainApiMessage::FinalizedBlockHash(block_header.number.clone(), tx).into()
+						ChainApiMessage::FinalizedBlockHash(block_header.number.clone(), tx)
 					).await;
 
 					let lost_to_finality = match rx.await {
@@ -469,7 +471,7 @@ pub(crate) async fn handle_new_head<'a>(
 
 		// If all bits are already set, then send an approve message.
 		if approved_bitfield.count_ones() == approved_bitfield.len() {
-			ctx.send_message(ChainSelectionMessage::Approved(block_hash).into()).await;
+			ctx.send_message(ChainSelectionMessage::Approved(block_hash)).await;
 		}
 
 		let block_entry = v1::BlockEntry {
@@ -498,7 +500,7 @@ pub(crate) async fn handle_new_head<'a>(
 
 			// Notify chain-selection of all approved hashes.
 			for hash in approved_hashes {
-				ctx.send_message(ChainSelectionMessage::Approved(hash).into()).await;
+				ctx.send_message(ChainSelectionMessage::Approved(hash)).await;
 			}
 		}
 
@@ -551,7 +553,7 @@ pub(crate) async fn handle_new_head<'a>(
 		"Informing distribution of newly imported chain",
 	);
 
-	ctx.send_unbounded_message(ApprovalDistributionMessage::NewBlocks(approval_meta).into());
+	ctx.send_unbounded_message(ApprovalDistributionMessage::NewBlocks(approval_meta));
 
 	Ok(imported_candidates)
 }
diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs
index f471488324e366f39710c1d6f9bab50c6b37df91..aff7a8ee14994d249db9b2ac60ba74d6ccd966e6 100644
--- a/polkadot/node/core/approval-voting/src/lib.rs
+++ b/polkadot/node/core/approval-voting/src/lib.rs
@@ -29,7 +29,7 @@ use polkadot_node_subsystem::{
 		AvailabilityRecoveryMessage, ChainSelectionMessage,
 	},
 	errors::RecoveryError,
-	Subsystem, SubsystemContext, SubsystemError, SubsystemResult, SpawnedSubsystem,
+	overseer::{self, SubsystemSender as _}, SubsystemContext, SubsystemError, SubsystemResult, SpawnedSubsystem,
 	FromOverseer, OverseerSignal, SubsystemSender,
 };
 use polkadot_node_subsystem_util::{
@@ -333,12 +333,15 @@ impl ApprovalVotingSubsystem {
 	}
 }
 
-impl<C> Subsystem<C> for ApprovalVotingSubsystem
-	where C: SubsystemContext<Message = ApprovalVotingMessage>
+impl<Context> overseer::Subsystem<Context, SubsystemError> for ApprovalVotingSubsystem
+where
+	Context: SubsystemContext<Message = ApprovalVotingMessage>,
+	Context: overseer::SubsystemContext<Message = ApprovalVotingMessage>,
 {
-	fn start(self, ctx: C) -> SpawnedSubsystem {
+
+	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		let backend = DbBackend::new(self.db.clone(), self.db_config);
-		let future = run::<DbBackend, C>(
+		let future = run::<DbBackend, Context>(
 			ctx,
 			self,
 			Box::new(SystemClock),
@@ -663,15 +666,16 @@ enum Action {
 	Conclude,
 }
 
-async fn run<B, C>(
-	mut ctx: C,
+async fn run<B, Context>(
+	mut ctx: Context,
 	mut subsystem: ApprovalVotingSubsystem,
 	clock: Box<dyn Clock + Send + Sync>,
 	assignment_criteria: Box<dyn AssignmentCriteria + Send + Sync>,
 	mut backend: B,
 ) -> SubsystemResult<()>
 	where
-		C: SubsystemContext<Message = ApprovalVotingMessage>,
+		Context: SubsystemContext<Message = ApprovalVotingMessage>,
+		Context: overseer::SubsystemContext<Message = ApprovalVotingMessage>,
 		B: Backend,
 {
 	let mut state = State {
@@ -797,7 +801,7 @@ async fn run<B, C>(
 //
 // returns `true` if any of the actions was a `Conclude` command.
 async fn handle_actions(
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext<Message = ApprovalVotingMessage> + overseer::SubsystemContext<Message = ApprovalVotingMessage>),
 	state: &mut State,
 	overlayed_db: &mut OverlayedBackend<'_, impl Backend>,
 	metrics: &Metrics,
@@ -861,7 +865,7 @@ async fn handle_actions(
 				ctx.send_unbounded_message(ApprovalDistributionMessage::DistributeAssignment(
 					indirect_cert,
 					candidate_index,
-				).into());
+				));
 
 				match approvals_cache.get(&candidate_hash) {
 					Some(ApprovalOutcome::Approved) => {
@@ -902,14 +906,14 @@ async fn handle_actions(
 				}
 			}
 			Action::NoteApprovedInChainSelection(block_hash) => {
-				ctx.send_message(ChainSelectionMessage::Approved(block_hash).into()).await;
+				ctx.send_message(ChainSelectionMessage::Approved(block_hash)).await;
 			}
 			Action::BecomeActive => {
 				*mode = Mode::Active;
 
 				let messages = distribution_messages_for_activation(overlayed_db)?;
 
-				ctx.send_messages(messages.into_iter().map(Into::into)).await;
+				ctx.send_messages(messages.into_iter()).await;
 			}
 			Action::Conclude => { conclude = true; }
 		}
@@ -1017,7 +1021,7 @@ fn distribution_messages_for_activation(
 
 // Handle an incoming signal from the overseer. Returns true if execution should conclude.
 async fn handle_from_overseer(
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext<Message = ApprovalVotingMessage> + overseer::SubsystemContext<Message = ApprovalVotingMessage>),
 	state: &mut State,
 	db: &mut OverlayedBackend<'_, impl Backend>,
 	metrics: &Metrics,
@@ -1130,7 +1134,7 @@ async fn handle_from_overseer(
 }
 
 async fn handle_approved_ancestor(
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	db: &OverlayedBackend<'_, impl Backend>,
 	target: Hash,
 	lower_bound: BlockNumber,
@@ -1149,7 +1153,7 @@ async fn handle_approved_ancestor(
 	let target_number = {
 		let (tx, rx) = oneshot::channel();
 
-		ctx.send_message(ChainApiMessage::BlockNumber(target, tx).into()).await;
+		ctx.send_message(ChainApiMessage::BlockNumber(target, tx)).await;
 
 		match rx.await {
 			Ok(Ok(Some(n))) => n,
@@ -1173,7 +1177,7 @@ async fn handle_approved_ancestor(
 			hash: target,
 			k: (target_number - (lower_bound + 1)) as usize,
 			response_channel: tx,
-		}.into()).await;
+		}).await;
 
 		match rx.await {
 			Ok(Ok(a)) => a,
@@ -1994,7 +1998,7 @@ fn process_wakeup(
 // spawned. When the background work is no longer needed, the `AbortHandle` should be dropped
 // to cancel the background work and any requests it has spawned.
 async fn launch_approval(
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext<Message = ApprovalVotingMessage> + overseer::SubsystemContext<Message = ApprovalVotingMessage>),
 	metrics: Metrics,
 	session_index: SessionIndex,
 	candidate: CandidateReceipt,
@@ -2043,7 +2047,7 @@ async fn launch_approval(
 		session_index,
 		Some(backing_group),
 		a_tx,
-	).into()).await;
+	)).await;
 
 	ctx.send_message(
 		RuntimeApiMessage::Request(
@@ -2052,7 +2056,7 @@ async fn launch_approval(
 				candidate.descriptor.validation_code_hash,
 				code_tx,
 			),
-		).into()
+		)
 	).await;
 
 	let candidate = candidate.clone();
diff --git a/polkadot/node/core/av-store/src/lib.rs b/polkadot/node/core/av-store/src/lib.rs
index 4c646f5e2d5b2629051a6aebc4ebec8f2b25c6d6..0160e87c434e9e8bc40f77d045bf61fc85d1e604 100644
--- a/polkadot/node/core/av-store/src/lib.rs
+++ b/polkadot/node/core/av-store/src/lib.rs
@@ -37,7 +37,9 @@ use polkadot_node_primitives::{
 	ErasureChunk, AvailableData,
 };
 use polkadot_subsystem::{
-	FromOverseer, OverseerSignal, SubsystemError, Subsystem, SubsystemContext, SpawnedSubsystem,
+	FromOverseer, OverseerSignal, SubsystemError,
+	SubsystemContext, SpawnedSubsystem,
+	overseer,
 	ActiveLeavesUpdate,
 	errors::{ChainApiError, RuntimeApiError},
 };
@@ -522,9 +524,10 @@ impl KnownUnfinalizedBlocks {
 	}
 }
 
-impl<Context> Subsystem<Context> for AvailabilityStoreSubsystem
+impl<Context> overseer::Subsystem<Context, SubsystemError> for AvailabilityStoreSubsystem
 where
 	Context: SubsystemContext<Message = AvailabilityStoreMessage>,
+	Context: overseer::SubsystemContext<Message = AvailabilityStoreMessage>,
 {
 	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		let future = run(self, ctx)
@@ -540,7 +543,8 @@ where
 
 async fn run<Context>(mut subsystem: AvailabilityStoreSubsystem, mut ctx: Context)
 where
-	Context: SubsystemContext<Message=AvailabilityStoreMessage>,
+	Context: SubsystemContext<Message = AvailabilityStoreMessage>,
+	Context: overseer::SubsystemContext<Message = AvailabilityStoreMessage>,
 {
 	let mut next_pruning = Delay::new(subsystem.pruning_config.pruning_interval).fuse();
 
@@ -570,7 +574,8 @@ async fn run_iteration<Context>(
 )
 	-> Result<bool, Error>
 where
-	Context: SubsystemContext<Message=AvailabilityStoreMessage>,
+	Context: SubsystemContext<Message = AvailabilityStoreMessage>,
+	Context: overseer::SubsystemContext<Message = AvailabilityStoreMessage>,
 {
 	select! {
 		incoming = ctx.recv().fuse() => {
@@ -615,18 +620,22 @@ where
 	Ok(false)
 }
 
-async fn process_block_activated(
-	ctx: &mut impl SubsystemContext,
+async fn process_block_activated<Context>(
+	ctx: &mut Context,
 	subsystem: &mut AvailabilityStoreSubsystem,
 	activated: Hash,
-) -> Result<(), Error> {
+) -> Result<(), Error>
+where
+	Context: SubsystemContext<Message = AvailabilityStoreMessage>,
+	Context: overseer::SubsystemContext<Message = AvailabilityStoreMessage>,
+{
 	let now = subsystem.clock.now()?;
 
 	let block_header = {
 		let (tx, rx) = oneshot::channel();
 
 		ctx.send_message(
-			ChainApiMessage::BlockHeader(activated, tx).into()
+			ChainApiMessage::BlockHeader(activated, tx)
 		).await;
 
 		match rx.await?? {
@@ -666,8 +675,8 @@ async fn process_block_activated(
 	Ok(())
 }
 
-async fn process_new_head(
-	ctx: &mut impl SubsystemContext,
+async fn process_new_head<Context>(
+	ctx: &mut Context,
 	db: &Arc<dyn KeyValueDB>,
 	db_transaction: &mut DBTransaction,
 	config: &Config,
@@ -675,12 +684,16 @@ async fn process_new_head(
 	now: Duration,
 	hash: Hash,
 	header: Header,
-) -> Result<(), Error> {
+) -> Result<(), Error>
+where
+	Context: SubsystemContext<Message = AvailabilityStoreMessage>,
+	Context: overseer::SubsystemContext<Message = AvailabilityStoreMessage>,
+{
 
 	let candidate_events = {
 		let (tx, rx) = oneshot::channel();
 		ctx.send_message(
-			RuntimeApiMessage::Request(hash, RuntimeApiRequest::CandidateEvents(tx)).into()
+			RuntimeApiMessage::Request(hash, RuntimeApiRequest::CandidateEvents(tx))
 		).await;
 
 		rx.await??
@@ -691,7 +704,7 @@ async fn process_new_head(
 	let n_validators = {
 		let (tx, rx) = oneshot::channel();
 		ctx.send_message(
-			RuntimeApiMessage::Request(header.parent_hash, RuntimeApiRequest::Validators(tx)).into()
+			RuntimeApiMessage::Request(header.parent_hash, RuntimeApiRequest::Validators(tx))
 		).await;
 
 		rx.await??.len()
@@ -835,12 +848,16 @@ macro_rules! peek_num {
 	}
 }
 
-async fn process_block_finalized(
-	ctx: &mut impl SubsystemContext,
+async fn process_block_finalized<Context>(
+	ctx: &mut Context,
 	subsystem: &AvailabilityStoreSubsystem,
 	finalized_hash: Hash,
 	finalized_number: BlockNumber,
-) -> Result<(), Error> {
+) -> Result<(), Error>
+where
+	Context: SubsystemContext<Message = AvailabilityStoreMessage>,
+	Context: overseer::SubsystemContext<Message = AvailabilityStoreMessage>,
+{
 	let now = subsystem.clock.now()?;
 
 	let mut next_possible_batch = 0;
@@ -869,7 +886,7 @@ async fn process_block_finalized(
 			finalized_hash
 		} else {
 			let (tx, rx) = oneshot::channel();
-			ctx.send_message(ChainApiMessage::FinalizedBlockHash(batch_num, tx).into()).await;
+			ctx.send_message(ChainApiMessage::FinalizedBlockHash(batch_num, tx)).await;
 
 			match rx.await?? {
 				None => {
diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs
index 7fccfdd419aeb578e42256833e0c348aee477870..fa7b0bb3ee3da7b6b11e722006aab1cb95f1c204 100644
--- a/polkadot/node/core/backing/src/lib.rs
+++ b/polkadot/node/core/backing/src/lib.rs
@@ -37,6 +37,7 @@ use polkadot_node_primitives::{
 use polkadot_subsystem::{
 	PerLeafSpan, Stage, SubsystemSender,
 	jaeger,
+	overseer,
 	messages::{
 		AllMessages, AvailabilityDistributionMessage, AvailabilityStoreMessage,
 		CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage,
@@ -308,7 +309,7 @@ async fn store_available_data(
 		n_validators,
 		available_data,
 		tx,
-	).into()).await;
+	)).await;
 
 	let _ = rx.await.map_err(Error::StoreAvailableData)?;
 
@@ -384,7 +385,7 @@ async fn request_pov(
 		candidate_hash,
 		pov_hash,
 		tx,
-	}.into()).await;
+	}).await;
 
 	let pov = rx.await.map_err(|_| Error::FetchPoV)?;
 	Ok(Arc::new(pov))
@@ -397,13 +398,12 @@ async fn request_candidate_validation(
 ) -> Result<ValidationResult, Error> {
 	let (tx, rx) = oneshot::channel();
 
-	sender.send_message(AllMessages::CandidateValidation(
-			CandidateValidationMessage::ValidateFromChainState(
-				candidate,
-				pov,
-				tx,
-			)
-		).into()
+	sender.send_message(
+		CandidateValidationMessage::ValidateFromChainState(
+			candidate,
+			pov,
+			tx,
+		)
 	).await;
 
 	match rx.await {
@@ -415,7 +415,7 @@ async fn request_candidate_validation(
 
 type BackgroundValidationResult = Result<(CandidateReceipt, CandidateCommitments, Arc<PoV>), CandidateReceipt>;
 
-struct BackgroundValidationParams<S, F> {
+struct BackgroundValidationParams<S: overseer::SubsystemSender<AllMessages>, F> {
 	sender: JobSender<S>,
 	tx_command: mpsc::Sender<ValidatedCandidateCommand>,
 	candidate: CandidateReceipt,
@@ -600,14 +600,14 @@ impl CandidateBackingJob {
 								root_span,
 							).await? {
 								sender.send_message(
-									CollatorProtocolMessage::Seconded(self.parent, stmt).into()
+									CollatorProtocolMessage::Seconded(self.parent, stmt)
 								).await;
 							}
 						}
 					}
 					Err(candidate) => {
 						sender.send_message(
-							CollatorProtocolMessage::Invalid(self.parent, candidate).into()
+							CollatorProtocolMessage::Invalid(self.parent, candidate)
 						).await;
 					}
 				}
@@ -683,7 +683,7 @@ impl CandidateBackingJob {
 			.map_or(false, |c| c != &candidate.descriptor().collator)
 		{
 			sender.send_message(
-				CollatorProtocolMessage::Invalid(self.parent, candidate.clone()).into()
+				CollatorProtocolMessage::Invalid(self.parent, candidate.clone())
 			).await;
 			return Ok(());
 		}
@@ -732,7 +732,7 @@ impl CandidateBackingJob {
 		if let Some(signed_statement) = self.sign_statement(statement).await {
 			self.import_statement(sender, &signed_statement, root_span).await?;
 			let smsg = StatementDistributionMessage::Share(self.parent, signed_statement.clone());
-			sender.send_unbounded_message(smsg.into());
+			sender.send_unbounded_message(smsg);
 
 			Ok(Some(signed_statement))
 		} else {
@@ -749,7 +749,7 @@ impl CandidateBackingJob {
 				ProvisionerMessage::ProvisionableData(
 					self.parent,
 					ProvisionableData::MisbehaviorReport(self.parent, validator_id, report)
-				).into()
+				)
 			).await;
 		}
 	}
@@ -801,7 +801,7 @@ impl CandidateBackingJob {
 						self.parent,
 						ProvisionableData::BackedCandidate(backed.receipt()),
 					);
-					sender.send_message(message.into()).await;
+					sender.send_message(message).await;
 
 					span.as_ref().map(|s| s.child("backed"));
 					span
diff --git a/polkadot/node/core/bitfield-signing/src/lib.rs b/polkadot/node/core/bitfield-signing/src/lib.rs
index a36b2d8baa0bc50d5d5414fda8594b11467c30dc..8da711da649a061f04bda4e047a14eea57a2f2eb 100644
--- a/polkadot/node/core/bitfield-signing/src/lib.rs
+++ b/polkadot/node/core/bitfield-signing/src/lib.rs
@@ -38,10 +38,14 @@ use polkadot_primitives::v1::{AvailabilityBitfield, CoreState, Hash, ValidatorIn
 use std::{pin::Pin, time::Duration, iter::FromIterator, sync::Arc};
 use wasm_timer::{Delay, Instant};
 
+#[cfg(test)]
+mod tests;
+
 /// Delay between starting a bitfield signing job and its attempting to create a bitfield.
 const JOB_DELAY: Duration = Duration::from_millis(1500);
 const LOG_TARGET: &str = "parachain::bitfield-signing";
 
+
 /// Each `BitfieldSigningJob` prepares a signed bitfield for a single relay parent.
 pub struct BitfieldSigningJob;
 
@@ -299,7 +303,7 @@ impl JobTrait for BitfieldSigningJob {
 				.send_message(BitfieldDistributionMessage::DistributeBitfield(
 					relay_parent,
 					signed_bitfield,
-				).into())
+				))
 				.await;
 
 			Ok(())
@@ -310,74 +314,3 @@ impl JobTrait for BitfieldSigningJob {
 
 /// BitfieldSigningSubsystem manages a number of bitfield signing jobs.
 pub type BitfieldSigningSubsystem<Spawner> = JobSubsystem<BitfieldSigningJob, Spawner>;
-
-#[cfg(test)]
-mod tests {
-	use super::*;
-	use futures::{pin_mut, executor::block_on};
-	use polkadot_primitives::v1::{CandidateHash, OccupiedCore};
-	use polkadot_node_subsystem::messages::AllMessages;
-
-	fn occupied_core(para_id: u32, candidate_hash: CandidateHash) -> CoreState {
-		CoreState::Occupied(OccupiedCore {
-			group_responsible: para_id.into(),
-			next_up_on_available: None,
-			occupied_since: 100_u32,
-			time_out_at: 200_u32,
-			next_up_on_time_out: None,
-			availability: Default::default(),
-			candidate_hash,
-			candidate_descriptor: Default::default(),
-		})
-	}
-
-	#[test]
-	fn construct_availability_bitfield_works() {
-		block_on(async move {
-			let relay_parent = Hash::default();
-			let validator_index = ValidatorIndex(1u32);
-
-			let (mut sender, mut receiver) = polkadot_node_subsystem_test_helpers::sender_receiver();
-			let future = construct_availability_bitfield(
-				relay_parent,
-				&jaeger::Span::Disabled,
-				validator_index,
-				&mut sender,
-			).fuse();
-			pin_mut!(future);
-
-			let hash_a = CandidateHash(Hash::repeat_byte(1));
-			let hash_b = CandidateHash(Hash::repeat_byte(2));
-
-			loop {
-				futures::select! {
-					m = receiver.next() => match m.unwrap() {
-						AllMessages::RuntimeApi(
-							RuntimeApiMessage::Request(rp, RuntimeApiRequest::AvailabilityCores(tx)),
-						) => {
-							assert_eq!(relay_parent, rp);
-							tx.send(Ok(vec![CoreState::Free, occupied_core(1, hash_a), occupied_core(2, hash_b)])).unwrap();
-						}
-						AllMessages::AvailabilityStore(
-							AvailabilityStoreMessage::QueryChunkAvailability(c_hash, vidx, tx),
-						) => {
-							assert_eq!(validator_index, vidx);
-
-							tx.send(c_hash == hash_a).unwrap();
-						},
-						o => panic!("Unknown message: {:?}", o),
-					},
-					r = future => match r {
-						Ok(r) => {
-							assert!(!r.0.get(0).unwrap());
-							assert!(r.0.get(1).unwrap());
-							assert!(!r.0.get(2).unwrap());
-							break
-						},
-						Err(e) => panic!("Failed: {:?}", e),
-					},
-				}
-			}
-		});
-	}
-}
diff --git a/polkadot/node/core/bitfield-signing/src/tests.rs b/polkadot/node/core/bitfield-signing/src/tests.rs
new file mode 100644
index 0000000000000000000000000000000000000000..a5f8e564599faccb13f694b962dd2f4149839efd
--- /dev/null
+++ b/polkadot/node/core/bitfield-signing/src/tests.rs
@@ -0,0 +1,83 @@
+// Copyright 2020 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use super::*;
+use futures::{pin_mut, executor::block_on};
+use polkadot_primitives::v1::{CandidateHash, OccupiedCore};
+use polkadot_node_subsystem::messages::AllMessages;
+
+fn occupied_core(para_id: u32, candidate_hash: CandidateHash) -> CoreState {
+	CoreState::Occupied(OccupiedCore {
+		group_responsible: para_id.into(),
+		next_up_on_available: None,
+		occupied_since: 100_u32,
+		time_out_at: 200_u32,
+		next_up_on_time_out: None,
+		availability: Default::default(),
+		candidate_hash,
+		candidate_descriptor: Default::default(),
+	})
+}
+
+#[test]
+fn construct_availability_bitfield_works() {
+	block_on(async move {
+		let relay_parent = Hash::default();
+		let validator_index = ValidatorIndex(1u32);
+
+		let (mut sender, mut receiver) = polkadot_node_subsystem_test_helpers::sender_receiver();
+		let future = construct_availability_bitfield(
+			relay_parent,
+			&jaeger::Span::Disabled,
+			validator_index,
+			&mut sender,
+		).fuse();
+		pin_mut!(future);
+
+		let hash_a = CandidateHash(Hash::repeat_byte(1));
+		let hash_b = CandidateHash(Hash::repeat_byte(2));
+
+		loop {
+			futures::select! {
+				m = receiver.next() => match m.unwrap() {
+					AllMessages::RuntimeApi(
+						RuntimeApiMessage::Request(rp, RuntimeApiRequest::AvailabilityCores(tx)),
+					) => {
+						assert_eq!(relay_parent, rp);
+						tx.send(Ok(vec![CoreState::Free, occupied_core(1, hash_a), occupied_core(2, hash_b)])).unwrap();
+					}
+					AllMessages::AvailabilityStore(
+						AvailabilityStoreMessage::QueryChunkAvailability(c_hash, vidx, tx),
+					) => {
+						assert_eq!(validator_index, vidx);
+
+						tx.send(c_hash == hash_a).unwrap();
+					},
+					o => panic!("Unknown message: {:?}", o),
+				},
+				r = future => match r {
+					Ok(r) => {
+						assert!(!r.0.get(0).unwrap());
+						assert!(r.0.get(1).unwrap());
+						assert!(!r.0.get(2).unwrap());
+						break
+					},
+					Err(e) => panic!("Failed: {:?}", e),
+				},
+			}
+		}
+	});
+}
diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml
index a109cb590510af867214c7ba324793151d8ee842..d9ccae79bd854ebab9ff5bb597867949a1bccd61 100644
--- a/polkadot/node/core/candidate-validation/Cargo.toml
+++ b/polkadot/node/core/candidate-validation/Cargo.toml
@@ -15,7 +15,7 @@ parity-scale-codec = { version = "2.0.0", default-features = false, features = [
 polkadot-primitives = { path = "../../../primitives" }
 polkadot-parachain = { path = "../../../parachain" }
 polkadot-node-primitives = { path = "../../primitives" }
-polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
+polkadot-node-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
 polkadot-node-subsystem-util = { path = "../../subsystem-util" }
 
 [target.'cfg(not(any(target_os = "android", target_os = "unknown")))'.dependencies]
diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs
index aabdf55517ede3444941fb5e7e7c292a98cf3075..9b22d82a37044c1360ade46d98764357cab9f818 100644
--- a/polkadot/node/core/candidate-validation/src/lib.rs
+++ b/polkadot/node/core/candidate-validation/src/lib.rs
@@ -23,16 +23,17 @@
 #![deny(unused_crate_dependencies, unused_results)]
 #![warn(missing_docs)]
 
-use polkadot_subsystem::{
-	Subsystem, SubsystemContext, SpawnedSubsystem, SubsystemResult, SubsystemError,
+use polkadot_node_subsystem::{
+	overseer,
+	SubsystemContext, SpawnedSubsystem, SubsystemResult, SubsystemError,
 	FromOverseer, OverseerSignal,
 	messages::{
-		AllMessages, CandidateValidationMessage, RuntimeApiMessage,
+		CandidateValidationMessage, RuntimeApiMessage,
 		ValidationFailed, RuntimeApiRequest,
 	},
+	errors::RuntimeApiError,
 };
 use polkadot_node_subsystem_util::metrics::{self, prometheus};
-use polkadot_subsystem::errors::RuntimeApiError;
 use polkadot_node_primitives::{
 	VALIDATION_CODE_BOMB_LIMIT, POV_BOMB_LIMIT, ValidationResult, InvalidCandidate, PoV, BlockData,
 };
@@ -84,10 +85,12 @@ impl CandidateValidationSubsystem {
 	}
 }
 
-impl<C> Subsystem<C> for CandidateValidationSubsystem where
-	C: SubsystemContext<Message = CandidateValidationMessage>,
+impl<Context> overseer::Subsystem<Context, SubsystemError> for CandidateValidationSubsystem
+where
+	Context: SubsystemContext<Message = CandidateValidationMessage>,
+	Context: overseer::SubsystemContext<Message = CandidateValidationMessage>,
 {
-	fn start(self, ctx: C) -> SpawnedSubsystem {
+	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		let future = run(ctx, self.metrics, self.config.artifacts_cache_path, self.config.program_path)
 			.map_err(|e| SubsystemError::with_origin("candidate-validation", e))
 			.boxed();
@@ -98,12 +101,16 @@ impl<C> Subsystem<C> for CandidateValidationSubsystem where
 	}
 }
 
-async fn run(
-	mut ctx: impl SubsystemContext<Message = CandidateValidationMessage>,
+async fn run<Context>(
+	mut ctx: Context,
 	metrics: Metrics,
 	cache_path: PathBuf,
 	program_path: PathBuf,
-) -> SubsystemResult<()> {
+) -> SubsystemResult<()>
+where
+	Context: SubsystemContext<Message = CandidateValidationMessage>,
+	Context: overseer::SubsystemContext<Message = CandidateValidationMessage>,
+{
 	let (mut validation_host, task) = polkadot_node_core_pvf::start(
 		polkadot_node_core_pvf::Config::new(cache_path, program_path),
 	);
@@ -174,17 +181,21 @@ async fn run(
 	}
 }
 
-async fn runtime_api_request<T>(
-	ctx: &mut impl SubsystemContext<Message = CandidateValidationMessage>,
+async fn runtime_api_request<T, Context>(
+	ctx: &mut Context,
 	relay_parent: Hash,
 	request: RuntimeApiRequest,
 	receiver: oneshot::Receiver<Result<T, RuntimeApiError>>,
-) -> SubsystemResult<Result<T, RuntimeApiError>> {
+) -> SubsystemResult<Result<T, RuntimeApiError>>
+where
+	Context: SubsystemContext<Message = CandidateValidationMessage>,
+	Context: overseer::SubsystemContext<Message = CandidateValidationMessage>,
+{
 	ctx.send_message(
-		AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+		RuntimeApiMessage::Request(
 			relay_parent,
 			request,
-		))
+		)
 	).await;
 
 	receiver.await.map_err(Into::into)
@@ -197,11 +208,15 @@ enum AssumptionCheckOutcome {
 	BadRequest,
 }
 
-async fn check_assumption_validation_data(
-	ctx: &mut impl SubsystemContext<Message = CandidateValidationMessage>,
+async fn check_assumption_validation_data<Context>(
+	ctx: &mut Context,
 	descriptor: &CandidateDescriptor,
 	assumption: OccupiedCoreAssumption,
-) -> SubsystemResult<AssumptionCheckOutcome> {
+) -> SubsystemResult<AssumptionCheckOutcome>
+where
+	Context: SubsystemContext<Message = CandidateValidationMessage>,
+	Context: overseer::SubsystemContext<Message = CandidateValidationMessage>,
+{
 	let validation_data = {
 		let (tx, rx) = oneshot::channel();
 		let d = runtime_api_request(
@@ -247,10 +262,14 @@ async fn check_assumption_validation_data(
 	})
 }
 
-async fn find_assumed_validation_data(
-	ctx: &mut impl SubsystemContext<Message = CandidateValidationMessage>,
+async fn find_assumed_validation_data<Context>(
+	ctx: &mut Context,
 	descriptor: &CandidateDescriptor,
-) -> SubsystemResult<AssumptionCheckOutcome> {
+) -> SubsystemResult<AssumptionCheckOutcome>
+where
+	Context: SubsystemContext<Message = CandidateValidationMessage>,
+	Context: overseer::SubsystemContext<Message = CandidateValidationMessage>,
+{
 	// The candidate descriptor has a `persisted_validation_data_hash` which corresponds to
 	// one of up to two possible values that we can derive from the state of the
 	// relay-parent. We can fetch these values by getting the persisted validation data
@@ -278,13 +297,17 @@ async fn find_assumed_validation_data(
 	Ok(AssumptionCheckOutcome::DoesNotMatch)
 }
 
-async fn spawn_validate_from_chain_state(
-	ctx: &mut impl SubsystemContext<Message = CandidateValidationMessage>,
+async fn spawn_validate_from_chain_state<Context>(
+	ctx: &mut Context,
 	validation_host: &mut ValidationHost,
 	descriptor: CandidateDescriptor,
 	pov: Arc<PoV>,
 	metrics: &Metrics,
-) -> SubsystemResult<Result<ValidationResult, ValidationFailed>> {
+) -> SubsystemResult<Result<ValidationResult, ValidationFailed>>
+where
+	Context: SubsystemContext<Message = CandidateValidationMessage>,
+	Context: overseer::SubsystemContext<Message = CandidateValidationMessage>,
+{
 	let (validation_data, validation_code) =
 		match find_assumed_validation_data(ctx, &descriptor).await? {
 			AssumptionCheckOutcome::Matches(validation_data, validation_code) => {
diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs
index 26ccdbb7cc9e828ed3195041e8999dd3fc1c85c2..c718122a63e7f1594bc14b40ce252e08183faeed 100644
--- a/polkadot/node/core/candidate-validation/src/tests.rs
+++ b/polkadot/node/core/candidate-validation/src/tests.rs
@@ -15,6 +15,7 @@
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
 use super::*;
+use polkadot_node_subsystem::messages::AllMessages;
 use polkadot_node_subsystem_test_helpers as test_helpers;
 use polkadot_primitives::v1::{HeadData, UpwardMessage};
 use sp_core::testing::TaskExecutor;
diff --git a/polkadot/node/core/chain-api/src/lib.rs b/polkadot/node/core/chain-api/src/lib.rs
index b7c152686afa5397c5b08b7fcd218ebc4debc8b6..264a27644d1fb0ebe558be103e3153f88a7ccca9 100644
--- a/polkadot/node/core/chain-api/src/lib.rs
+++ b/polkadot/node/core/chain-api/src/lib.rs
@@ -40,10 +40,15 @@ use sp_blockchain::HeaderBackend;
 use polkadot_node_subsystem_util::metrics::{self, prometheus};
 use polkadot_primitives::v1::{Block, BlockId};
 use polkadot_subsystem::{
-	messages::ChainApiMessage, FromOverseer, OverseerSignal, SpawnedSubsystem, Subsystem,
+	overseer,
+	messages::ChainApiMessage,
+	FromOverseer, OverseerSignal, SpawnedSubsystem,
 	SubsystemContext, SubsystemError, SubsystemResult,
 };
 
+#[cfg(test)]
+mod tests;
+
 const LOG_TARGET: &str = "parachain::chain-api";
 
 /// The Chain API Subsystem implementation.
@@ -62,13 +67,14 @@ impl<Client> ChainApiSubsystem<Client> {
 	}
 }
 
-impl<Client, Context> Subsystem<Context> for ChainApiSubsystem<Client>
+impl<Client, Context> overseer::Subsystem<Context, SubsystemError> for ChainApiSubsystem<Client>
 where
 	Client: HeaderBackend<Block> + AuxStore + 'static,
 	Context: SubsystemContext<Message = ChainApiMessage>,
+	Context: overseer::SubsystemContext<Message = ChainApiMessage>,
 {
 	fn start(self, ctx: Context) -> SpawnedSubsystem {
-		let future = run(ctx, self)
+		let future = run::<Client, Context>(ctx, self)
 			.map_err(|e| SubsystemError::with_origin("chain-api", e))
 			.boxed();
 		SpawnedSubsystem {
@@ -78,12 +84,14 @@ where
 	}
 }
 
-async fn run<Client>(
-	mut ctx: impl SubsystemContext<Message = ChainApiMessage>,
+async fn run<Client, Context>(
+	mut ctx: Context,
 	subsystem: ChainApiSubsystem<Client>,
 ) -> SubsystemResult<()>
 where
 	Client: HeaderBackend<Block> + AuxStore,
+	Context: SubsystemContext<Message = ChainApiMessage>,
+	Context: overseer::SubsystemContext<Message = ChainApiMessage>,
 {
 	loop {
 		match ctx.recv().await? {
@@ -291,311 +299,3 @@ impl metrics::Metrics for Metrics {
 		Ok(Metrics(Some(metrics)))
 	}
 }
-
-#[cfg(test)]
-mod tests {
-	use super::*;
-
-	use std::collections::BTreeMap;
-	use futures::{future::BoxFuture, channel::oneshot};
-	use parity_scale_codec::Encode;
-
-	use polkadot_primitives::v1::{Hash, BlockNumber, BlockId, Header};
-	use polkadot_node_primitives::BlockWeight;
-	use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystemContextHandle};
-	use sp_blockchain::Info as BlockInfo;
-	use sp_core::testing::TaskExecutor;
-
-	#[derive(Clone)]
-	struct TestClient {
-		blocks: BTreeMap<Hash, BlockNumber>,
-		block_weights: BTreeMap<Hash, BlockWeight>,
-		finalized_blocks: BTreeMap<BlockNumber, Hash>,
-		headers: BTreeMap<Hash, Header>,
-	}
-
-	const ONE: Hash = Hash::repeat_byte(0x01);
-	const TWO: Hash = Hash::repeat_byte(0x02);
-	const THREE: Hash = Hash::repeat_byte(0x03);
-	const FOUR: Hash = Hash::repeat_byte(0x04);
-	const ERROR_PATH: Hash = Hash::repeat_byte(0xFF);
-
-	fn default_header() -> Header {
-		Header {
-			parent_hash: Hash::zero(),
-			number: 100500,
-			state_root: Hash::zero(),
-			extrinsics_root: Hash::zero(),
-			digest: Default::default(),
-		}
-	}
-
-	impl Default for TestClient {
-		fn default() -> Self {
-			Self {
-				blocks: maplit::btreemap! {
-					ONE => 1,
-					TWO => 2,
-					THREE => 3,
-					FOUR => 4,
-				},
-				block_weights: maplit::btreemap! {
-					ONE => 0,
-					TWO => 1,
-					THREE => 1,
-					FOUR => 2,
-				},
-				finalized_blocks: maplit::btreemap! {
-					1 => ONE,
-					3 => THREE,
-				},
-				headers: maplit::btreemap! {
-					TWO => Header {
-						parent_hash: ONE,
-						number: 2,
-						..default_header()
-					},
-					THREE => Header {
-						parent_hash: TWO,
-						number: 3,
-						..default_header()
-					},
-					FOUR => Header {
-						parent_hash: THREE,
-						number: 4,
-						..default_header()
-					},
-					ERROR_PATH => Header {
-						..default_header()
-					}
-				},
-			}
-		}
-	}
-
-	fn last_key_value<K: Clone, V: Clone>(map: &BTreeMap<K, V>) -> (K, V) {
-		assert!(!map.is_empty());
-		map.iter()
-			.last()
-			.map(|(k, v)| (k.clone(), v.clone()))
-			.unwrap()
-	}
-
-	impl HeaderBackend<Block> for TestClient {
-		fn info(&self) -> BlockInfo<Block> {
-			let genesis_hash = self.blocks.iter().next().map(|(h, _)| *h).unwrap();
-			let (best_hash, best_number) = last_key_value(&self.blocks);
-			let (finalized_number, finalized_hash) = last_key_value(&self.finalized_blocks);
-
-			BlockInfo {
-				best_hash,
-				best_number,
-				genesis_hash,
-				finalized_hash,
-				finalized_number,
-				number_leaves: 0,
-				finalized_state: None,
-			}
-		}
-		fn number(&self, hash: Hash) -> sp_blockchain::Result<Option<BlockNumber>> {
-			Ok(self.blocks.get(&hash).copied())
-		}
-		fn hash(&self, number: BlockNumber) -> sp_blockchain::Result<Option<Hash>> {
-			Ok(self.finalized_blocks.get(&number).copied())
-		}
-		fn header(&self, id: BlockId) -> sp_blockchain::Result<Option<Header>> {
-			match id {
-				// for error path testing
-				BlockId::Hash(hash) if hash.is_zero()  => {
-					Err(sp_blockchain::Error::Backend("Zero hashes are illegal!".into()))
-				}
-				BlockId::Hash(hash) => {
-					Ok(self.headers.get(&hash).cloned())
-				}
-				_ => unreachable!(),
-			}
-		}
-		fn status(&self, _id: BlockId) -> sp_blockchain::Result<sp_blockchain::BlockStatus> {
-			unimplemented!()
-		}
-	}
-
-	fn test_harness(
-		test: impl FnOnce(Arc<TestClient>, TestSubsystemContextHandle<ChainApiMessage>)
-			-> BoxFuture<'static, ()>,
-	) {
-		let (ctx, ctx_handle) = make_subsystem_context(TaskExecutor::new());
-		let client = Arc::new(TestClient::default());
-
-		let subsystem = ChainApiSubsystem::new(client.clone(), Metrics(None));
-		let chain_api_task = run(ctx, subsystem).map(|x| x.unwrap());
-		let test_task = test(client, ctx_handle);
-
-		futures::executor::block_on(future::join(chain_api_task, test_task));
-	}
-
-	impl AuxStore for TestClient {
-		fn insert_aux<
-			'a,
-			'b: 'a,
-			'c: 'a,
-			I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
-			D: IntoIterator<Item = &'a &'b [u8]>,
-		>(
-			&self,
-			_insert: I,
-			_delete: D,
-		) -> sp_blockchain::Result<()> {
-			unimplemented!()
-		}
-
-		fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
-			Ok(self
-				.block_weights
-				.iter()
-				.find(|(hash, _)| sc_consensus_babe::aux_schema::block_weight_key(hash) == key)
-				.map(|(_, weight)| weight.encode()))
-		}
-	}
-
-	#[test]
-	fn request_block_number() {
-		test_harness(|client, mut sender| {
-			async move {
-				let zero = Hash::zero();
-				let test_cases = [
-					(TWO, client.number(TWO).unwrap()),
-					(zero, client.number(zero).unwrap()), // not here
-				];
-				for (hash, expected) in &test_cases {
-					let (tx, rx) = oneshot::channel();
-
-					sender.send(FromOverseer::Communication {
-						msg: ChainApiMessage::BlockNumber(*hash, tx),
-					}).await;
-
-					assert_eq!(rx.await.unwrap().unwrap(), *expected);
-				}
-
-				sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
-			}.boxed()
-		})
-	}
-
-	#[test]
-	fn request_block_header() {
-		test_harness(|client, mut sender| {
-			async move {
-				const NOT_HERE: Hash = Hash::repeat_byte(0x5);
-				let test_cases = [
-					(TWO, client.header(BlockId::Hash(TWO)).unwrap()),
-					(NOT_HERE, client.header(BlockId::Hash(NOT_HERE)).unwrap()),
-				];
-				for (hash, expected) in &test_cases {
-					let (tx, rx) = oneshot::channel();
-
-					sender.send(FromOverseer::Communication {
-						msg: ChainApiMessage::BlockHeader(*hash, tx),
-					}).await;
-
-					assert_eq!(rx.await.unwrap().unwrap(), *expected);
-				}
-
-				sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
-			}.boxed()
-		})
-	}
-
-	#[test]
-	fn request_block_weight() {
-		test_harness(|client, mut sender| {
-			async move {
-				const NOT_HERE: Hash = Hash::repeat_byte(0x5);
-				let test_cases = [
-					(TWO, sc_consensus_babe::block_weight(&*client, TWO).unwrap()),
-					(FOUR, sc_consensus_babe::block_weight(&*client, FOUR).unwrap()),
-					(NOT_HERE, sc_consensus_babe::block_weight(&*client, NOT_HERE).unwrap()),
-				];
-				for (hash, expected) in &test_cases {
-					let (tx, rx) = oneshot::channel();
-
-					sender.send(FromOverseer::Communication {
-						msg: ChainApiMessage::BlockWeight(*hash, tx),
-					}).await;
-
-					assert_eq!(rx.await.unwrap().unwrap(), *expected);
-				}
-
-				sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
-			}.boxed()
-		})
-	}
-
-	#[test]
-	fn request_finalized_hash() {
-		test_harness(|client, mut sender| {
-			async move {
-				let test_cases = [
-					(1, client.hash(1).unwrap()), // not here
-					(2, client.hash(2).unwrap()),
-				];
-				for (number, expected) in &test_cases {
-					let (tx, rx) = oneshot::channel();
-
-					sender.send(FromOverseer::Communication {
-						msg: ChainApiMessage::FinalizedBlockHash(*number, tx),
-					}).await;
-
-					assert_eq!(rx.await.unwrap().unwrap(), *expected);
-				}
-
-				sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
-			}.boxed()
-		})
-	}
-
-	#[test]
-	fn request_last_finalized_number() {
-		test_harness(|client, mut sender| {
-			async move {
-				let (tx, rx) = oneshot::channel();
-
-				let expected = client.info().finalized_number;
-				sender.send(FromOverseer::Communication {
-					msg: ChainApiMessage::FinalizedBlockNumber(tx),
-				}).await;
-
-				assert_eq!(rx.await.unwrap().unwrap(), expected);
-
-				sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
-			}.boxed()
-		})
-	}
-
-	#[test]
-	fn request_ancestors() {
-		test_harness(|_client, mut sender| {
-			async move {
-				let (tx, rx) = oneshot::channel();
-				sender.send(FromOverseer::Communication {
-					msg: ChainApiMessage::Ancestors { hash: THREE, k: 4, response_channel: tx },
-				}).await;
-				assert_eq!(rx.await.unwrap().unwrap(), vec![TWO, ONE]);
-
-				let (tx, rx) = oneshot::channel();
-				sender.send(FromOverseer::Communication {
-					msg: ChainApiMessage::Ancestors { hash: TWO, k: 1, response_channel: tx },
-				}).await;
-				assert_eq!(rx.await.unwrap().unwrap(), vec![ONE]);
-
-				let (tx, rx) = oneshot::channel();
-				sender.send(FromOverseer::Communication {
-					msg: ChainApiMessage::Ancestors { hash: ERROR_PATH, k: 2, response_channel: tx },
-				}).await;
-				assert!(rx.await.unwrap().is_err());
-
-				sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
-			}.boxed()
-		})
-	}
-}
diff --git a/polkadot/node/core/chain-api/src/tests.rs b/polkadot/node/core/chain-api/src/tests.rs
new file mode 100644
index 0000000000000000000000000000000000000000..730a6a2ae69d420b6308409da49696b6c96ab309
--- /dev/null
+++ b/polkadot/node/core/chain-api/src/tests.rs
@@ -0,0 +1,304 @@
+use super::*;
+
+use std::collections::BTreeMap;
+use futures::{future::BoxFuture, channel::oneshot};
+use parity_scale_codec::Encode;
+
+use polkadot_primitives::v1::{Hash, BlockNumber, BlockId, Header};
+use polkadot_node_primitives::BlockWeight;
+use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystemContextHandle};
+use sp_blockchain::Info as BlockInfo;
+use sp_core::testing::TaskExecutor;
+
+#[derive(Clone)]
+struct TestClient {
+	blocks: BTreeMap<Hash, BlockNumber>,
+	block_weights: BTreeMap<Hash, BlockWeight>,
+	finalized_blocks: BTreeMap<BlockNumber, Hash>,
+	headers: BTreeMap<Hash, Header>,
+}
+
+const ONE: Hash = Hash::repeat_byte(0x01);
+const TWO: Hash = Hash::repeat_byte(0x02);
+const THREE: Hash = Hash::repeat_byte(0x03);
+const FOUR: Hash = Hash::repeat_byte(0x04);
+const ERROR_PATH: Hash = Hash::repeat_byte(0xFF);
+
+fn default_header() -> Header {
+	Header {
+		parent_hash: Hash::zero(),
+		number: 100500,
+		state_root: Hash::zero(),
+		extrinsics_root: Hash::zero(),
+		digest: Default::default(),
+	}
+}
+
+impl Default for TestClient {
+	fn default() -> Self {
+		Self {
+			blocks: maplit::btreemap! {
+				ONE => 1,
+				TWO => 2,
+				THREE => 3,
+				FOUR => 4,
+			},
+			block_weights: maplit::btreemap! {
+				ONE => 0,
+				TWO => 1,
+				THREE => 1,
+				FOUR => 2,
+			},
+			finalized_blocks: maplit::btreemap! {
+				1 => ONE,
+				3 => THREE,
+			},
+			headers: maplit::btreemap! {
+				TWO => Header {
+					parent_hash: ONE,
+					number: 2,
+					..default_header()
+				},
+				THREE => Header {
+					parent_hash: TWO,
+					number: 3,
+					..default_header()
+				},
+				FOUR => Header {
+					parent_hash: THREE,
+					number: 4,
+					..default_header()
+				},
+				ERROR_PATH => Header {
+					..default_header()
+				}
+			},
+		}
+	}
+}
+
+fn last_key_value<K: Clone, V: Clone>(map: &BTreeMap<K, V>) -> (K, V) {
+	assert!(!map.is_empty());
+	map.iter()
+		.last()
+		.map(|(k, v)| (k.clone(), v.clone()))
+		.unwrap()
+}
+
+impl HeaderBackend<Block> for TestClient {
+	fn info(&self) -> BlockInfo<Block> {
+		let genesis_hash = self.blocks.iter().next().map(|(h, _)| *h).unwrap();
+		let (best_hash, best_number) = last_key_value(&self.blocks);
+		let (finalized_number, finalized_hash) = last_key_value(&self.finalized_blocks);
+
+		BlockInfo {
+			best_hash,
+			best_number,
+			genesis_hash,
+			finalized_hash,
+			finalized_number,
+			number_leaves: 0,
+			finalized_state: None,
+		}
+	}
+	fn number(&self, hash: Hash) -> sp_blockchain::Result<Option<BlockNumber>> {
+		Ok(self.blocks.get(&hash).copied())
+	}
+	fn hash(&self, number: BlockNumber) -> sp_blockchain::Result<Option<Hash>> {
+		Ok(self.finalized_blocks.get(&number).copied())
+	}
+	fn header(&self, id: BlockId) -> sp_blockchain::Result<Option<Header>> {
+		match id {
+			// for error path testing
+			BlockId::Hash(hash) if hash.is_zero()  => {
+				Err(sp_blockchain::Error::Backend("Zero hashes are illegal!".into()))
+			}
+			BlockId::Hash(hash) => {
+				Ok(self.headers.get(&hash).cloned())
+			}
+			_ => unreachable!(),
+		}
+	}
+	fn status(&self, _id: BlockId) -> sp_blockchain::Result<sp_blockchain::BlockStatus> {
+		unimplemented!()
+	}
+}
+
+fn test_harness(
+	test: impl FnOnce(Arc<TestClient>, TestSubsystemContextHandle<ChainApiMessage>)
+		-> BoxFuture<'static, ()>,
+) {
+	let (ctx, ctx_handle) = make_subsystem_context(TaskExecutor::new());
+	let client = Arc::new(TestClient::default());
+
+	let subsystem = ChainApiSubsystem::new(client.clone(), Metrics(None));
+	let chain_api_task = run(ctx, subsystem).map(|x| x.unwrap());
+	let test_task = test(client, ctx_handle);
+
+	futures::executor::block_on(future::join(chain_api_task, test_task));
+}
+
+impl AuxStore for TestClient {
+	fn insert_aux<
+		'a,
+		'b: 'a,
+		'c: 'a,
+		I: IntoIterator<Item = &'a (&'c [u8], &'c [u8])>,
+		D: IntoIterator<Item = &'a &'b [u8]>,
+	>(
+		&self,
+		_insert: I,
+		_delete: D,
+	) -> sp_blockchain::Result<()> {
+		unimplemented!()
+	}
+
+	fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
+		Ok(self
+			.block_weights
+			.iter()
+			.find(|(hash, _)| sc_consensus_babe::aux_schema::block_weight_key(hash) == key)
+			.map(|(_, weight)| weight.encode()))
+	}
+}
+
+#[test]
+fn request_block_number() {
+	test_harness(|client, mut sender| {
+		async move {
+			let zero = Hash::zero();
+			let test_cases = [
+				(TWO, client.number(TWO).unwrap()),
+				(zero, client.number(zero).unwrap()), // not here
+			];
+			for (hash, expected) in &test_cases {
+				let (tx, rx) = oneshot::channel();
+
+				sender.send(FromOverseer::Communication {
+					msg: ChainApiMessage::BlockNumber(*hash, tx),
+				}).await;
+
+				assert_eq!(rx.await.unwrap().unwrap(), *expected);
+			}
+
+			sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
+		}.boxed()
+	})
+}
+
+#[test]
+fn request_block_header() {
+	test_harness(|client, mut sender| {
+		async move {
+			const NOT_HERE: Hash = Hash::repeat_byte(0x5);
+			let test_cases = [
+				(TWO, client.header(BlockId::Hash(TWO)).unwrap()),
+				(NOT_HERE, client.header(BlockId::Hash(NOT_HERE)).unwrap()),
+			];
+			for (hash, expected) in &test_cases {
+				let (tx, rx) = oneshot::channel();
+
+				sender.send(FromOverseer::Communication {
+					msg: ChainApiMessage::BlockHeader(*hash, tx),
+				}).await;
+
+				assert_eq!(rx.await.unwrap().unwrap(), *expected);
+			}
+
+			sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
+		}.boxed()
+	})
+}
+
+#[test]
+fn request_block_weight() {
+	test_harness(|client, mut sender| {
+		async move {
+			const NOT_HERE: Hash = Hash::repeat_byte(0x5);
+			let test_cases = [
+				(TWO, sc_consensus_babe::block_weight(&*client, TWO).unwrap()),
+				(FOUR, sc_consensus_babe::block_weight(&*client, FOUR).unwrap()),
+				(NOT_HERE, sc_consensus_babe::block_weight(&*client, NOT_HERE).unwrap()),
+			];
+			for (hash, expected) in &test_cases {
+				let (tx, rx) = oneshot::channel();
+
+				sender.send(FromOverseer::Communication {
+					msg: ChainApiMessage::BlockWeight(*hash, tx),
+				}).await;
+
+				assert_eq!(rx.await.unwrap().unwrap(), *expected);
+			}
+
+			sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
+		}.boxed()
+	})
+}
+
+#[test]
+fn request_finalized_hash() {
+	test_harness(|client, mut sender| {
+		async move {
+			let test_cases = [
+				(1, client.hash(1).unwrap()), // not here
+				(2, client.hash(2).unwrap()),
+			];
+			for (number, expected) in &test_cases {
+				let (tx, rx) = oneshot::channel();
+
+				sender.send(FromOverseer::Communication {
+					msg: ChainApiMessage::FinalizedBlockHash(*number, tx),
+				}).await;
+
+				assert_eq!(rx.await.unwrap().unwrap(), *expected);
+			}
+
+			sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
+		}.boxed()
+	})
+}
+
+#[test]
+fn request_last_finalized_number() {
+	test_harness(|client, mut sender| {
+		async move {
+			let (tx, rx) = oneshot::channel();
+
+			let expected = client.info().finalized_number;
+			sender.send(FromOverseer::Communication {
+				msg: ChainApiMessage::FinalizedBlockNumber(tx),
+			}).await;
+
+			assert_eq!(rx.await.unwrap().unwrap(), expected);
+
+			sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
+		}.boxed()
+	})
+}
+
+#[test]
+fn request_ancestors() {
+	test_harness(|_client, mut sender| {
+		async move {
+			let (tx, rx) = oneshot::channel();
+			sender.send(FromOverseer::Communication {
+				msg: ChainApiMessage::Ancestors { hash: THREE, k: 4, response_channel: tx },
+			}).await;
+			assert_eq!(rx.await.unwrap().unwrap(), vec![TWO, ONE]);
+
+			let (tx, rx) = oneshot::channel();
+			sender.send(FromOverseer::Communication {
+				msg: ChainApiMessage::Ancestors { hash: TWO, k: 1, response_channel: tx },
+			}).await;
+			assert_eq!(rx.await.unwrap().unwrap(), vec![ONE]);
+
+			let (tx, rx) = oneshot::channel();
+			sender.send(FromOverseer::Communication {
+				msg: ChainApiMessage::Ancestors { hash: ERROR_PATH, k: 2, response_channel: tx },
+			}).await;
+			assert!(rx.await.unwrap().is_err());
+
+			sender.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
+		}.boxed()
+	})
+}
diff --git a/polkadot/node/core/chain-selection/Cargo.toml b/polkadot/node/core/chain-selection/Cargo.toml
index f6b42a2bec4774a34ea9637d79446ae454abaad1..84112ba345ed3f74a78c9f1ed3cbd1c87554a1fb 100644
--- a/polkadot/node/core/chain-selection/Cargo.toml
+++ b/polkadot/node/core/chain-selection/Cargo.toml
@@ -11,7 +11,7 @@ futures-timer = "3"
 tracing = "0.1.26"
 polkadot-primitives = { path = "../../../primitives" }
 polkadot-node-primitives = { path = "../../primitives" }
-polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
+polkadot-node-subsystem = { path = "../../subsystem" }
 polkadot-node-subsystem-util = { path = "../../subsystem-util" }
 kvdb = "0.10.0"
 thiserror = "1.0.23"
diff --git a/polkadot/node/core/chain-selection/src/lib.rs b/polkadot/node/core/chain-selection/src/lib.rs
index 39d416eb22b214b4306b0b5d49d998578be61d8a..a52119c76ef5124c8fb951c9881780912cf2b9a7 100644
--- a/polkadot/node/core/chain-selection/src/lib.rs
+++ b/polkadot/node/core/chain-selection/src/lib.rs
@@ -18,8 +18,8 @@
 
 use polkadot_primitives::v1::{BlockNumber, Hash, Header, ConsensusLog};
 use polkadot_node_primitives::BlockWeight;
-use polkadot_subsystem::{
-	Subsystem, SubsystemContext, SubsystemError, SpawnedSubsystem,
+use polkadot_node_subsystem::{
+	overseer, SubsystemContext, SubsystemError, SpawnedSubsystem,
 	OverseerSignal, FromOverseer,
 	messages::{ChainSelectionMessage, ChainApiMessage},
 	errors::ChainApiError,
@@ -306,8 +306,10 @@ impl ChainSelectionSubsystem {
 	}
 }
 
-impl<Context> Subsystem<Context> for ChainSelectionSubsystem
-	where Context: SubsystemContext<Message = ChainSelectionMessage>
+impl<Context> overseer::Subsystem<Context, SubsystemError> for ChainSelectionSubsystem
+where
+	Context: SubsystemContext<Message = ChainSelectionMessage>,
+	Context: overseer::SubsystemContext<Message = ChainSelectionMessage>,
 {
 	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		let backend = crate::db_backend::v1::DbBackend::new(
@@ -337,6 +339,7 @@ async fn run<Context, B>(
 )
 	where
 		Context: SubsystemContext<Message = ChainSelectionMessage>,
+		Context: overseer::SubsystemContext<Message = ChainSelectionMessage>,
 		B: Backend,
 {
 	loop {
@@ -376,6 +379,7 @@ async fn run_iteration<Context, B>(
 	-> Result<(), Error>
 	where
 		Context: SubsystemContext<Message = ChainSelectionMessage>,
+		Context: overseer::SubsystemContext<Message = ChainSelectionMessage>,
 		B: Backend,
 {
 	let mut stagnant_check_stream = stagnant_check_interval.timeout_stream();
@@ -439,11 +443,11 @@ async fn fetch_finalized(
 	let (number_tx, number_rx) = oneshot::channel();
 	let (hash_tx, hash_rx) = oneshot::channel();
 
-	ctx.send_message(ChainApiMessage::FinalizedBlockNumber(number_tx).into()).await;
+	ctx.send_message(ChainApiMessage::FinalizedBlockNumber(number_tx)).await;
 
 	let number = number_rx.await??;
 
-	ctx.send_message(ChainApiMessage::FinalizedBlockHash(number, hash_tx).into()).await;
+	ctx.send_message(ChainApiMessage::FinalizedBlockHash(number, hash_tx)).await;
 
 	match hash_rx.await?? {
 		None => {
@@ -464,7 +468,7 @@ async fn fetch_header(
 	hash: Hash,
 ) -> Result<Option<Header>, Error> {
 	let (h_tx, h_rx) = oneshot::channel();
-	ctx.send_message(ChainApiMessage::BlockHeader(hash, h_tx).into()).await;
+	ctx.send_message(ChainApiMessage::BlockHeader(hash, h_tx)).await;
 
 	h_rx.await?.map_err(Into::into)
 }
@@ -474,7 +478,7 @@ async fn fetch_block_weight(
 	hash: Hash,
 ) -> Result<Option<BlockWeight>, Error> {
 	let (tx, rx) = oneshot::channel();
-	ctx.send_message(ChainApiMessage::BlockWeight(hash, tx).into()).await;
+	ctx.send_message(ChainApiMessage::BlockWeight(hash, tx)).await;
 
 	rx.await?.map_err(Into::into)
 }
diff --git a/polkadot/node/core/chain-selection/src/tests.rs b/polkadot/node/core/chain-selection/src/tests.rs
index 1449fae3f5dd1245de7731ca35e4394d049fc357..0256412154fc2dd907b31518a12d269cb3809b92 100644
--- a/polkadot/node/core/chain-selection/src/tests.rs
+++ b/polkadot/node/core/chain-selection/src/tests.rs
@@ -31,8 +31,11 @@ use sp_core::testing::TaskExecutor;
 use assert_matches::assert_matches;
 
 use polkadot_primitives::v1::{BlakeTwo256, HashT, ConsensusLog};
-use polkadot_subsystem::{jaeger, ActiveLeavesUpdate, ActivatedLeaf, LeafStatus};
-use polkadot_subsystem::messages::AllMessages;
+use polkadot_node_subsystem::{
+	messages::AllMessages,
+	jaeger,
+	ActiveLeavesUpdate, ActivatedLeaf, LeafStatus,
+};
 use polkadot_node_subsystem_test_helpers as test_helpers;
 
 #[derive(Default)]
diff --git a/polkadot/node/core/dispute-coordinator/src/lib.rs b/polkadot/node/core/dispute-coordinator/src/lib.rs
index 9d4daeeba7e4dab28f9073c00ac2468cbc3e70f1..0bd94103ca8bc9662ba1b7e4562d93b801be0498 100644
--- a/polkadot/node/core/dispute-coordinator/src/lib.rs
+++ b/polkadot/node/core/dispute-coordinator/src/lib.rs
@@ -30,10 +30,11 @@ use std::sync::Arc;
 
 use polkadot_node_primitives::{CandidateVotes, SignedDisputeStatement};
 use polkadot_node_subsystem::{
+	overseer,
 	messages::{
 		DisputeCoordinatorMessage, ChainApiMessage, DisputeParticipationMessage,
 	},
-	Subsystem, SubsystemContext, FromOverseer, OverseerSignal, SpawnedSubsystem,
+	SubsystemContext, FromOverseer, OverseerSignal, SpawnedSubsystem,
 	SubsystemError,
 	errors::{ChainApiError, RuntimeApiError},
 };
@@ -101,8 +102,10 @@ impl DisputeCoordinatorSubsystem {
 	}
 }
 
-impl<Context> Subsystem<Context> for DisputeCoordinatorSubsystem
-	where Context: SubsystemContext<Message = DisputeCoordinatorMessage>
+impl<Context> overseer::Subsystem<Context, SubsystemError> for DisputeCoordinatorSubsystem
+where
+	Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
+	Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
 {
 	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		let future = run(self, ctx)
@@ -160,7 +163,9 @@ impl Error {
 }
 
 async fn run<Context>(subsystem: DisputeCoordinatorSubsystem, mut ctx: Context)
-	where Context: SubsystemContext<Message = DisputeCoordinatorMessage>
+where
+	Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
+	Context: SubsystemContext<Message = DisputeCoordinatorMessage>
 {
 	loop {
 		let res = run_iteration(&mut ctx, &subsystem).await;
@@ -187,7 +192,9 @@ async fn run<Context>(subsystem: DisputeCoordinatorSubsystem, mut ctx: Context)
 // lead to another call to this function.
 async fn run_iteration<Context>(ctx: &mut Context, subsystem: &DisputeCoordinatorSubsystem)
 	-> Result<(), Error>
-	where Context: SubsystemContext<Message = DisputeCoordinatorMessage>
+where
+	Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
+	Context: SubsystemContext<Message = DisputeCoordinatorMessage>
 {
 	let DisputeCoordinatorSubsystem { ref store, ref keystore, ref config } = *subsystem;
 	let mut state = State {
@@ -225,7 +232,7 @@ async fn run_iteration<Context>(ctx: &mut Context, subsystem: &DisputeCoordinato
 }
 
 async fn handle_new_activations(
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext<Message = DisputeCoordinatorMessage> + overseer::SubsystemContext<Message = DisputeCoordinatorMessage>),
 	store: &dyn KeyValueDB,
 	state: &mut State,
 	config: &Config,
@@ -236,7 +243,7 @@ async fn handle_new_activations(
 			let (tx, rx) = oneshot::channel();
 
 			ctx.send_message(
-				ChainApiMessage::BlockHeader(new_leaf, tx).into()
+				ChainApiMessage::BlockHeader(new_leaf, tx)
 			).await;
 
 			match rx.await?? {
@@ -488,7 +495,7 @@ async fn handle_import_statements(
 			candidate_receipt,
 			session,
 			n_validators: n_validators as u32,
-		}.into()).await;
+		}).await;
 	}
 
 	if concluded_valid && already_disputed {
diff --git a/polkadot/node/core/dispute-participation/src/lib.rs b/polkadot/node/core/dispute-participation/src/lib.rs
index 19bc56a020a6046a0fe5dd83c070f383c5d5b9bb..19827cab41204f412892bd59222b4163de4b192f 100644
--- a/polkadot/node/core/dispute-participation/src/lib.rs
+++ b/polkadot/node/core/dispute-participation/src/lib.rs
@@ -26,12 +26,13 @@ use futures::prelude::*;
 use polkadot_node_primitives::ValidationResult;
 use polkadot_node_subsystem::{
 	errors::{RecoveryError, RuntimeApiError},
+	overseer,
 	messages::{
-		AllMessages, AvailabilityRecoveryMessage, AvailabilityStoreMessage,
+		AvailabilityRecoveryMessage, AvailabilityStoreMessage,
 		CandidateValidationMessage, DisputeCoordinatorMessage, DisputeParticipationMessage,
 		RuntimeApiMessage, RuntimeApiRequest,
 	},
-	ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, Subsystem,
+	ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem,
 	SubsystemContext, SubsystemError,
 };
 use polkadot_primitives::v1::{BlockNumber, CandidateHash, CandidateReceipt, Hash, SessionIndex};
@@ -55,9 +56,10 @@ impl DisputeParticipationSubsystem {
 	}
 }
 
-impl<Context> Subsystem<Context> for DisputeParticipationSubsystem
+impl<Context> overseer::Subsystem<Context, SubsystemError> for DisputeParticipationSubsystem
 where
 	Context: SubsystemContext<Message = DisputeParticipationMessage>,
+	Context: overseer::SubsystemContext<Message = DisputeParticipationMessage>,
 {
 	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		let future = run(ctx).map(|_| Ok(())).boxed();
@@ -111,6 +113,7 @@ impl Error {
 async fn run<Context>(mut ctx: Context)
 where
 	Context: SubsystemContext<Message = DisputeParticipationMessage>,
+	Context: overseer::SubsystemContext<Message = DisputeParticipationMessage>,
 {
 	let mut state = State { recent_block: None };
 
@@ -196,7 +199,6 @@ async fn participate(
 			None,
 			recover_available_data_tx,
 		)
-		.into(),
 	)
 	.await;
 
@@ -223,7 +225,6 @@ async fn participate(
 				code_tx,
 			),
 		)
-		.into(),
 	)
 	.await;
 
@@ -252,7 +253,6 @@ async fn participate(
 			available_data.clone(),
 			store_available_data_tx,
 		)
-		.into(),
 	)
 	.await;
 
@@ -277,7 +277,6 @@ async fn participate(
 			available_data.pov,
 			validation_tx,
 		)
-		.into(),
 	)
 	.await;
 
@@ -360,13 +359,13 @@ async fn issue_local_statement(
 	session: SessionIndex,
 	valid: bool,
 ) {
-	ctx.send_message(AllMessages::DisputeCoordinator(
+	ctx.send_message(
 		DisputeCoordinatorMessage::IssueLocalStatement(
 			session,
 			candidate_hash,
 			candidate_receipt,
 			valid,
 		),
-	))
+	)
 	.await
 }
diff --git a/polkadot/node/core/dispute-participation/src/tests.rs b/polkadot/node/core/dispute-participation/src/tests.rs
index 2b086c43d179f6040a8db3700ba9cea7e18b89f8..a56e204b0551807e984f37f5304acaaee34431c0 100644
--- a/polkadot/node/core/dispute-participation/src/tests.rs
+++ b/polkadot/node/core/dispute-participation/src/tests.rs
@@ -24,7 +24,8 @@ use super::*;
 use parity_scale_codec::Encode;
 use polkadot_node_primitives::{AvailableData, BlockData, InvalidCandidate, PoV};
 use polkadot_node_subsystem::{
-	jaeger, messages::ValidationFailed, ActivatedLeaf, ActiveLeavesUpdate, LeafStatus,
+	overseer::Subsystem,
+	jaeger, messages::{AllMessages, ValidationFailed}, ActivatedLeaf, ActiveLeavesUpdate, LeafStatus,
 };
 use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystemContextHandle};
 use polkadot_primitives::v1::{BlakeTwo256, CandidateCommitments, HashT, Header, ValidationCode};
diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml
index 6dd3d3bbdab6bf1369d02248fd7fa84d45b2dc7e..bfd1e84c415d2604987d5735f5340b77c5a0407c 100644
--- a/polkadot/node/core/parachains-inherent/Cargo.toml
+++ b/polkadot/node/core/parachains-inherent/Cargo.toml
@@ -11,7 +11,6 @@ tracing = "0.1.26"
 thiserror = "1.0.23"
 async-trait = "0.1.47"
 polkadot-node-subsystem = { path = "../../subsystem" }
-polkadot-overseer = { path = "../../overseer" }
 polkadot-primitives = { path = "../../../primitives" }
 sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/node/core/parachains-inherent/src/lib.rs b/polkadot/node/core/parachains-inherent/src/lib.rs
index 23bd250a2f3c4029bfa76dcd297a28406e070ab9..0ba09a86ace94fe2bc912cb725586802f91df663 100644
--- a/polkadot/node/core/parachains-inherent/src/lib.rs
+++ b/polkadot/node/core/parachains-inherent/src/lib.rs
@@ -26,9 +26,9 @@
 
 use futures::{select, FutureExt};
 use polkadot_node_subsystem::{
-	messages::{AllMessages, ProvisionerMessage}, SubsystemError,
+	overseer::Handle,
+	messages::ProvisionerMessage, errors::SubsystemError,
 };
-use polkadot_overseer::OverseerHandler;
 use polkadot_primitives::v1::{
 	Block, Hash, InherentData as ParachainsInherentData,
 };
@@ -48,19 +48,17 @@ impl ParachainsInherentDataProvider {
 	/// Create a new instance of the [`ParachainsInherentDataProvider`].
 	pub async fn create<C: HeaderBackend<Block>>(
 		client: &C,
-		mut overseer: OverseerHandler,
+		mut overseer: Handle,
 		parent: Hash,
 	) -> Result<Self, Error> {
 		let pid = async {
 			let (sender, receiver) = futures::channel::oneshot::channel();
 			overseer.wait_for_activation(parent, sender).await;
-			receiver.await.map_err(|_| Error::ClosedChannelAwaitingActivation)?.map_err(Error::Subsystem)?;
+			receiver.await.map_err(|_| Error::ClosedChannelAwaitingActivation)?.map_err(|e| Error::Subsystem(e))?;
 
 			let (sender, receiver) = futures::channel::oneshot::channel();
 			overseer.send_msg(
-				AllMessages::Provisioner(
-					ProvisionerMessage::RequestInherentData(parent, sender),
-				),
+				ProvisionerMessage::RequestInherentData(parent, sender),
 				std::any::type_name::<Self>(),
 			).await;
 
@@ -127,7 +125,7 @@ impl sp_inherents::InherentDataProvider for ParachainsInherentDataProvider {
 #[derive(thiserror::Error, Debug)]
 pub enum Error {
 	#[error("Blockchain error")]
-	Blockchain(sp_blockchain::Error),
+	Blockchain(#[from] sp_blockchain::Error),
 	#[error("Timeout: provisioner did not return inherent data after {:?}", PROVISIONER_TIMEOUT)]
 	Timeout,
 	#[error("Could not find the parent header in the blockchain: {:?}", _0)]
@@ -137,5 +135,5 @@ pub enum Error {
 	#[error("Closed channel from provisioner when awaiting inherent data")]
 	ClosedChannelAwaitingInherentData,
 	#[error("Subsystem failed")]
-	Subsystem(SubsystemError),
+	Subsystem(#[from] SubsystemError),
 }
diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs
index 980a67858a560b59e84c5541fd881b6ca79b57bd..88c85a03726d6e9a88e45eed7fd11f1616441897 100644
--- a/polkadot/node/core/runtime-api/src/lib.rs
+++ b/polkadot/node/core/runtime-api/src/lib.rs
@@ -23,12 +23,14 @@
 #![warn(missing_docs)]
 
 use polkadot_subsystem::{
-	Subsystem, SpawnedSubsystem, SubsystemResult, SubsystemContext,
-	FromOverseer, OverseerSignal,
+	SubsystemError, SubsystemResult,
+	FromOverseer, OverseerSignal, SpawnedSubsystem,
+	SubsystemContext,
+	errors::RuntimeApiError,
 	messages::{
 		RuntimeApiMessage, RuntimeApiRequest as Request,
 	},
-	errors::RuntimeApiError,
+	overseer,
 };
 use polkadot_node_subsystem_util::metrics::{self, prometheus};
 use polkadot_primitives::v1::{Block, BlockId, Hash, ParachainHost};
@@ -85,10 +87,11 @@ impl<Client> RuntimeApiSubsystem<Client> {
 	}
 }
 
-impl<Client, Context> Subsystem<Context> for RuntimeApiSubsystem<Client> where
+impl<Client, Context> overseer::Subsystem<Context, SubsystemError> for RuntimeApiSubsystem<Client> where
 	Client: ProvideRuntimeApi<Block> + Send + 'static + Sync,
 	Client::Api: ParachainHost<Block> + BabeApi<Block> + AuthorityDiscoveryApi<Block>,
-	Context: SubsystemContext<Message = RuntimeApiMessage>
+	Context: SubsystemContext<Message = RuntimeApiMessage>,
+	Context: overseer::SubsystemContext<Message = RuntimeApiMessage>,
 {
 	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		SpawnedSubsystem {
@@ -265,12 +268,14 @@ impl<Client> RuntimeApiSubsystem<Client> where
 	}
 }
 
-async fn run<Client>(
-	mut ctx: impl SubsystemContext<Message = RuntimeApiMessage>,
+async fn run<Client, Context>(
+	mut ctx: Context,
 	mut subsystem: RuntimeApiSubsystem<Client>,
 ) -> SubsystemResult<()> where
 	Client: ProvideRuntimeApi<Block> + Send + Sync + 'static,
 	Client::Api: ParachainHost<Block> + BabeApi<Block> + AuthorityDiscoveryApi<Block>,
+	Context: SubsystemContext<Message = RuntimeApiMessage>,
+	Context: overseer::SubsystemContext<Message = RuntimeApiMessage>,
 {
 	loop {
 		select! {
diff --git a/polkadot/node/jaeger/src/spans.rs b/polkadot/node/jaeger/src/spans.rs
index acbb8541f2916f0de6564fd72e9971f7e5c1e97d..4f3114d39b596004084992ef2947a6ed8062d5ca 100644
--- a/polkadot/node/jaeger/src/spans.rs
+++ b/polkadot/node/jaeger/src/spans.rs
@@ -143,7 +143,6 @@ impl std::ops::Deref for PerLeafSpan {
 #[repr(u8)]
 #[non_exhaustive]
 pub enum Stage {
-	CandidateSelection = 1,
 	CandidateBacking = 2,
 	StatementDistribution = 3,
 	PoVDistribution = 4,
diff --git a/polkadot/node/malus/README.md b/polkadot/node/malus/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a52e19bfefc5cab9522c09ae6cb6421e52084358
--- /dev/null
+++ b/polkadot/node/malus/README.md
@@ -0,0 +1,3 @@
+# malus
+
+Create nemesis nodes with alternate, at best fault, at worst intentionally destructive behavior traits.
\ No newline at end of file
diff --git a/polkadot/node/malus/src/lib.rs b/polkadot/node/malus/src/lib.rs
index 1b8945a6c465841557765a684dd826c89a726a31..516d4840b3c48ea3de338e1f74d55200e85ad298 100644
--- a/polkadot/node/malus/src/lib.rs
+++ b/polkadot/node/malus/src/lib.rs
@@ -21,7 +21,7 @@
 //! messages on the overseer level.
 
 use polkadot_node_subsystem::*;
-pub use polkadot_node_subsystem::{messages::AllMessages, FromOverseer};
+pub use polkadot_node_subsystem::{overseer, messages::AllMessages, FromOverseer};
 use std::future::Future;
 use std::pin::Pin;
 
@@ -50,9 +50,9 @@ pub struct FilteredSender<Sender, Fil> {
 }
 
 #[async_trait::async_trait]
-impl<Sender, Fil> SubsystemSender for FilteredSender<Sender, Fil>
+impl<Sender, Fil> overseer::SubsystemSender<AllMessages> for FilteredSender<Sender, Fil>
 where
-	Sender: SubsystemSender,
+	Sender: overseer::SubsystemSender<AllMessages>,
 	Fil: MsgFilter,
 {
 	async fn send_message(&mut self, msg: AllMessages) {
@@ -79,19 +79,19 @@ where
 }
 
 /// A subsystem context, that filters the outgoing messages.
-pub struct FilteredContext<Context: SubsystemContext, Fil: MsgFilter> {
+pub struct FilteredContext<Context: overseer::SubsystemContext + SubsystemContext, Fil: MsgFilter> {
 	inner: Context,
 	message_filter: Fil,
-	sender: FilteredSender<<Context as SubsystemContext>::Sender, Fil>,
+	sender: FilteredSender<<Context as overseer::SubsystemContext>::Sender, Fil>,
 }
 
 impl<Context, Fil> FilteredContext<Context, Fil>
 where
-	Context: SubsystemContext,
-	Fil: MsgFilter<Message = <Context as SubsystemContext>::Message>,
+	Context: overseer::SubsystemContext + SubsystemContext,
+	Fil: MsgFilter<Message = <Context as overseer::SubsystemContext>::Message>,
 {
 	pub fn new(mut inner: Context, message_filter: Fil) -> Self {
-		let sender = FilteredSender::<<Context as SubsystemContext>::Sender, Fil> {
+		let sender = FilteredSender::<<Context as overseer::SubsystemContext>::Sender, Fil> {
 			inner: inner.sender().clone(),
 			message_filter: message_filter.clone(),
 		};
@@ -104,13 +104,17 @@ where
 }
 
 #[async_trait::async_trait]
-impl<Context, Fil> SubsystemContext for FilteredContext<Context, Fil>
+impl<Context, Fil> overseer::SubsystemContext for FilteredContext<Context, Fil>
 where
-	Context: SubsystemContext,
-	Fil: MsgFilter<Message = <Context as SubsystemContext>::Message>,
+	Context: overseer::SubsystemContext + SubsystemContext,
+	Fil: MsgFilter<Message = <Context as overseer::SubsystemContext>::Message>,
+	<Context as overseer::SubsystemContext>::AllMessages: From<<Context as overseer::SubsystemContext>::Message>,
 {
-	type Message = <Context as SubsystemContext>::Message;
-	type Sender = FilteredSender<<Context as SubsystemContext>::Sender, Fil>;
+	type Message = <Context as overseer::SubsystemContext>::Message;
+	type Sender = FilteredSender<<Context as overseer::SubsystemContext>::Sender, Fil>;
+	type Error = <Context as overseer::SubsystemContext>::Error;
+	type AllMessages = <Context as overseer::SubsystemContext>::AllMessages;
+	type Signal = <Context as overseer::SubsystemContext>::Signal;
 
 	async fn try_recv(&mut self) -> Result<Option<FromOverseer<Self::Message>>, ()> {
 		loop {
@@ -170,15 +174,15 @@ impl<Sub, Fil> FilteredSubsystem<Sub, Fil> {
 	}
 }
 
-impl<Context, Sub, Fil> Subsystem<Context> for FilteredSubsystem<Sub, Fil>
+impl<Context, Sub, Fil> overseer::Subsystem<Context, SubsystemError> for FilteredSubsystem<Sub, Fil>
 where
-	Context: SubsystemContext + Sync + Send,
-	Sub: Subsystem<FilteredContext<Context, Fil>>,
-	FilteredContext<Context, Fil>: SubsystemContext,
-	Fil: MsgFilter<Message = <Context as SubsystemContext>::Message>,
+	Context: overseer::SubsystemContext + SubsystemContext + Sync + Send,
+	Sub: overseer::Subsystem<FilteredContext<Context, Fil>, SubsystemError>,
+	FilteredContext<Context, Fil>: overseer::SubsystemContext + SubsystemContext,
+	Fil: MsgFilter<Message = <Context as overseer::SubsystemContext>::Message>,
 {
 	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		let ctx = FilteredContext::new(ctx, self.message_filter);
-		Subsystem::<FilteredContext<Context, Fil>>::start(self.subsystem, ctx)
+		overseer::Subsystem::<FilteredContext<Context, Fil>, SubsystemError>::start(self.subsystem, ctx)
 	}
 }
diff --git a/polkadot/node/malus/src/variant-a.rs b/polkadot/node/malus/src/variant-a.rs
index 1e9cb7928cb2ce163c48df90178214934de2fe9b..6b89f64071ec041c54f4d6dabbc1b1c72522cbce 100644
--- a/polkadot/node/malus/src/variant-a.rs
+++ b/polkadot/node/malus/src/variant-a.rs
@@ -27,7 +27,7 @@ use polkadot_cli::{
 	create_default_subsystems,
 	service::{
 		AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, HeaderBackend, Overseer,
-		OverseerGen, OverseerGenArgs, OverseerHandler, ParachainHost, ProvideRuntimeApi,
+		OverseerGen, OverseerGenArgs, Handle, ParachainHost, ProvideRuntimeApi,
 		SpawnNamed,
 	},
 	Cli,
@@ -73,7 +73,7 @@ impl OverseerGen for BehaveMaleficient {
 	fn generate<'a, Spawner, RuntimeClient>(
 		&self,
 		args: OverseerGenArgs<'a, Spawner, RuntimeClient>,
-	) -> Result<(Overseer<Spawner, Arc<RuntimeClient>>, OverseerHandler), Error>
+	) -> Result<(Overseer<Spawner, Arc<RuntimeClient>>, Handle), Error>
 	where
 		RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
 		RuntimeClient::Api: ParachainHost<Block> + BabeApi<Block> + AuthorityDiscoveryApi<Block>,
diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..6fb4ce12f6edcf009fdfc7d1463fd4d5d0d4d1fc
--- /dev/null
+++ b/polkadot/node/metrics/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+name = "polkadot-node-metrics"
+version = "0.1.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2018"
+description = "Subsystem traits and message definitions"
+
+[dependencies]
+async-trait = "0.1.42"
+futures = "0.3.15"
+futures-timer = "3.0.2"
+
+metered-channel = { path = "../metered-channel"}
+
+sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
+substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/node/metrics/src/lib.rs b/polkadot/node/metrics/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..7a149a553b7cdfa497dc65479e55005620217998
--- /dev/null
+++ b/polkadot/node/metrics/src/lib.rs
@@ -0,0 +1,121 @@
+// Copyright 2017-2020 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Utility module for subsystems
+//!
+//! Many subsystems have common interests such as canceling a bunch of spawned jobs,
+//! or determining what their validator ID is. These common interests are factored into
+//! this module.
+//!
+//! This crate also reexports Prometheus metric types which are expected to be implemented by subsystems.
+
+#![warn(missing_docs)]
+
+use futures::prelude::*;
+use futures_timer::Delay;
+use std::{
+	pin::Pin,
+	task::{Poll, Context},
+	time::Duration,
+};
+
+pub use metered_channel as metered;
+
+/// This module reexports Prometheus types and defines the [`Metrics`] trait.
+pub mod metrics {
+	/// Reexport Substrate Prometheus types.
+	pub use substrate_prometheus_endpoint as prometheus;
+
+
+	/// Subsystem- or job-specific Prometheus metrics.
+	///
+	/// Usually implemented as a wrapper for `Option<ActualMetrics>`
+	/// to ensure `Default` bounds or as a dummy type ().
+	/// Prometheus metrics internally hold an `Arc` reference, so cloning them is fine.
+	pub trait Metrics: Default + Clone {
+		/// Try to register metrics in the Prometheus registry.
+		fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError>;
+
+		/// Convenience method to register metrics in the optional Promethius registry.
+		///
+		/// If no registry is provided, returns `Default::default()`. Otherwise, returns the same
+		/// thing that `try_register` does.
+		fn register(registry: Option<&prometheus::Registry>) -> Result<Self, prometheus::PrometheusError> {
+			match registry {
+				None => Ok(Self::default()),
+				Some(registry) => Self::try_register(registry),
+			}
+		}
+	}
+
+	// dummy impl
+	impl Metrics for () {
+		fn try_register(_registry: &prometheus::Registry) -> Result<(), prometheus::PrometheusError> {
+			Ok(())
+		}
+	}
+}
+
+#[derive(Copy, Clone)]
+enum MetronomeState {
+	Snooze,
+	SetAlarm,
+}
+
+/// Create a stream of ticks with a defined cycle duration.
+pub struct Metronome {
+	delay: Delay,
+	period: Duration,
+	state: MetronomeState,
+}
+
+impl Metronome {
+	/// Create a new metronome source with a defined cycle duration.
+	pub fn new(cycle: Duration) -> Self {
+		let period = cycle.into();
+		Self {
+			period,
+			delay: Delay::new(period),
+			state: MetronomeState::Snooze,
+		}
+	}
+}
+
+impl futures::Stream for Metronome {
+	type Item = ();
+	fn poll_next(
+		mut self: Pin<&mut Self>,
+		cx: &mut Context<'_>
+	) -> Poll<Option<Self::Item>> {
+		loop {
+			match self.state {
+				MetronomeState::SetAlarm => {
+					let val = self.period.clone();
+					self.delay.reset(val);
+					self.state = MetronomeState::Snooze;
+				}
+				MetronomeState::Snooze => {
+					if !Pin::new(&mut self.delay).poll(cx).is_ready() {
+						break
+					}
+					self.state = MetronomeState::SetAlarm;
+					return Poll::Ready(Some(()));
+				}
+			}
+		}
+		Poll::Pending
+	}
+}
diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs
index 160d45957b0e0c2e4041a0bf152e47f77aa9ee96..c0848a3281734a33bd348394cb028a76af6b75dc 100644
--- a/polkadot/node/network/approval-distribution/src/lib.rs
+++ b/polkadot/node/network/approval-distribution/src/lib.rs
@@ -29,11 +29,13 @@ use polkadot_node_primitives::{
 	approval::{AssignmentCert, BlockApprovalMeta, IndirectSignedApprovalVote, IndirectAssignmentCert},
 };
 use polkadot_node_subsystem::{
+	overseer,
 	messages::{
-		AllMessages, ApprovalDistributionMessage, ApprovalVotingMessage, NetworkBridgeMessage,
+		ApprovalDistributionMessage, ApprovalVotingMessage, NetworkBridgeMessage,
 		AssignmentCheckResult, ApprovalCheckResult, NetworkBridgeEvent,
 	},
-	ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, Subsystem, SubsystemContext,
+	SubsystemError,
+	ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext,
 };
 use polkadot_node_subsystem_util::{
 	metrics::{self, prometheus},
@@ -187,7 +189,7 @@ enum PendingMessage {
 impl State {
 	async fn handle_network_msg(
 		&mut self,
-		ctx: &mut impl SubsystemContext<Message = ApprovalDistributionMessage>,
+		ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage> + overseer::SubsystemContext<Message = ApprovalDistributionMessage>),
 		metrics: &Metrics,
 		event: NetworkBridgeEvent<protocol_v1::ApprovalDistributionMessage>,
 	) {
@@ -257,8 +259,7 @@ impl State {
 
 	async fn handle_new_blocks(
 		&mut self,
-		ctx: &mut impl SubsystemContext<Message = ApprovalDistributionMessage>,
-		metrics: &Metrics,
+		ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage> + overseer::SubsystemContext<Message = ApprovalDistributionMessage>),		metrics: &Metrics,
 		metas: Vec<BlockApprovalMeta>,
 	) {
 		let mut new_hashes = HashSet::new();
@@ -360,8 +361,7 @@ impl State {
 
 	async fn process_incoming_peer_message(
 		&mut self,
-		ctx: &mut impl SubsystemContext<Message = ApprovalDistributionMessage>,
-		metrics: &Metrics,
+		ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage> + overseer::SubsystemContext<Message = ApprovalDistributionMessage>),		metrics: &Metrics,
 		peer_id: PeerId,
 		msg: protocol_v1::ApprovalDistributionMessage,
 	) {
@@ -448,8 +448,7 @@ impl State {
 
 	async fn handle_peer_view_change(
 		&mut self,
-		ctx: &mut impl SubsystemContext<Message = ApprovalDistributionMessage>,
-		metrics: &Metrics,
+		ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage> + overseer::SubsystemContext<Message = ApprovalDistributionMessage>),		metrics: &Metrics,
 		peer_id: PeerId,
 		view: View,
 	) {
@@ -512,8 +511,7 @@ impl State {
 
 	async fn import_and_circulate_assignment(
 		&mut self,
-		ctx: &mut impl SubsystemContext<Message = ApprovalDistributionMessage>,
-		metrics: &Metrics,
+		ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage> + overseer::SubsystemContext<Message = ApprovalDistributionMessage>),		metrics: &Metrics,
 		source: MessageSource,
 		assignment: IndirectAssignmentCert,
 		claimed_candidate_index: CandidateIndex,
@@ -592,11 +590,11 @@ impl State {
 
 			let (tx, rx) = oneshot::channel();
 
-			ctx.send_message(AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment(
+			ctx.send_message(ApprovalVotingMessage::CheckAndImportAssignment(
 				assignment.clone(),
 				claimed_candidate_index,
 				tx,
-			))).await;
+			)).await;
 
 			let timer = metrics.time_awaiting_approval_voting();
 			let result = match rx.await {
@@ -743,14 +741,13 @@ impl State {
 				protocol_v1::ValidationProtocol::ApprovalDistribution(
 					protocol_v1::ApprovalDistributionMessage::Assignments(assignments)
 				),
-			).into()).await;
+			)).await;
 		}
 	}
 
 	async fn import_and_circulate_approval(
 		&mut self,
-		ctx: &mut impl SubsystemContext<Message = ApprovalDistributionMessage>,
-		metrics: &Metrics,
+		ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage> + overseer::SubsystemContext<Message = ApprovalDistributionMessage>),		metrics: &Metrics,
 		source: MessageSource,
 		vote: IndirectSignedApprovalVote,
 	) {
@@ -840,10 +837,10 @@ impl State {
 
 			let (tx, rx) = oneshot::channel();
 
-			ctx.send_message(AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportApproval(
+			ctx.send_message(ApprovalVotingMessage::CheckAndImportApproval(
 				vote.clone(),
 				tx,
-			))).await;
+			)).await;
 
 			let timer = metrics.time_awaiting_approval_voting();
 			let result = match rx.await {
@@ -989,13 +986,12 @@ impl State {
 				protocol_v1::ValidationProtocol::ApprovalDistribution(
 					protocol_v1::ApprovalDistributionMessage::Approvals(approvals)
 				),
-			).into()).await;
+			)).await;
 		}
 	}
 
 	async fn unify_with_peer(
-		ctx: &mut impl SubsystemContext<Message = ApprovalDistributionMessage>,
-		gossip_peers: &HashSet<PeerId>,
+		ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage> + overseer::SubsystemContext<Message = ApprovalDistributionMessage>),		gossip_peers: &HashSet<PeerId>,
 		metrics: &Metrics,
 		entries: &mut HashMap<Hash, BlockEntry>,
 		peer_id: PeerId,
@@ -1060,8 +1056,7 @@ impl State {
 
 	async fn send_gossip_messages_to_peer(
 		entries: &HashMap<Hash, BlockEntry>,
-		ctx: &mut impl SubsystemContext<Message = ApprovalDistributionMessage>,
-		peer_id: PeerId,
+		ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage> + overseer::SubsystemContext<Message = ApprovalDistributionMessage>),		peer_id: PeerId,
 		blocks: Vec<Hash>,
 	) {
 		let mut assignments = Vec::new();
@@ -1130,7 +1125,7 @@ impl State {
 				protocol_v1::ValidationProtocol::ApprovalDistribution(
 					protocol_v1::ApprovalDistributionMessage::Assignments(assignments)
 				),
-			).into()).await;
+			)).await;
 		}
 
 		if !approvals.is_empty() {
@@ -1147,7 +1142,7 @@ impl State {
 				protocol_v1::ValidationProtocol::ApprovalDistribution(
 					protocol_v1::ApprovalDistributionMessage::Approvals(approvals)
 				),
-			).into()).await;
+			)).await;
 		}
 	}
 }
@@ -1155,7 +1150,7 @@ impl State {
 
 /// Modify the reputation of a peer based on its behavior.
 async fn modify_reputation(
-	ctx: &mut impl SubsystemContext<Message = ApprovalDistributionMessage>,
+	ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage> + overseer::SubsystemContext<Message = ApprovalDistributionMessage>),
 	peer_id: PeerId,
 	rep: Rep,
 ) {
@@ -1166,9 +1161,9 @@ async fn modify_reputation(
 		"Reputation change for peer",
 	);
 
-	ctx.send_message(AllMessages::NetworkBridge(
+	ctx.send_message(
 		NetworkBridgeMessage::ReportPeer(peer_id, rep),
-	)).await;
+	).await;
 }
 
 impl ApprovalDistribution {
@@ -1180,6 +1175,7 @@ impl ApprovalDistribution {
 	async fn run<Context>(self, ctx: Context)
 	where
 		Context: SubsystemContext<Message = ApprovalDistributionMessage>,
+		Context: overseer::SubsystemContext<Message = ApprovalDistributionMessage>,
 	{
 		let mut state = State::default();
 		self.run_inner(ctx, &mut state).await
@@ -1189,6 +1185,7 @@ impl ApprovalDistribution {
 	async fn run_inner<Context>(self, mut ctx: Context, state: &mut State)
 	where
 		Context: SubsystemContext<Message = ApprovalDistributionMessage>,
+		Context: overseer::SubsystemContext<Message = ApprovalDistributionMessage>,
 	{
 		loop {
 			let message = match ctx.recv().await {
@@ -1261,11 +1258,12 @@ impl ApprovalDistribution {
 	}
 }
 
-impl<C> Subsystem<C> for ApprovalDistribution
+impl<Context> overseer::Subsystem<Context, SubsystemError> for ApprovalDistribution
 where
-	C: SubsystemContext<Message = ApprovalDistributionMessage> + Sync + Send,
+	Context: SubsystemContext<Message = ApprovalDistributionMessage>,
+	Context: overseer::SubsystemContext<Message = ApprovalDistributionMessage>,
 {
-	fn start(self, ctx: C) -> SpawnedSubsystem {
+	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		let future = self.run(ctx)
 			.map(|_| Ok(()))
 			.boxed();
diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs
index 675761ef14b95a12bc6690d02c43993adec0e489..a0d55c3371d33f4cfcd5ac74c81e5b3a1a291d3d 100644
--- a/polkadot/node/network/approval-distribution/src/tests.rs
+++ b/polkadot/node/network/approval-distribution/src/tests.rs
@@ -17,7 +17,7 @@
 use std::time::Duration;
 use futures::{future, Future, executor};
 use assert_matches::assert_matches;
-use polkadot_node_subsystem::messages::ApprovalCheckError;
+use polkadot_node_subsystem::messages::{AllMessages, ApprovalCheckError};
 use polkadot_node_subsystem_test_helpers as test_helpers;
 use polkadot_node_subsystem_util::TimeoutExt as _;
 use polkadot_node_network_protocol::{view, ObservedRole};
diff --git a/polkadot/node/network/availability-distribution/src/lib.rs b/polkadot/node/network/availability-distribution/src/lib.rs
index 65580ea41d885797810747ea405564180589a678..67de44ccdcb8796ae9b01f5a4512116ed7e71604 100644
--- a/polkadot/node/network/availability-distribution/src/lib.rs
+++ b/polkadot/node/network/availability-distribution/src/lib.rs
@@ -20,7 +20,8 @@ use sp_keystore::SyncCryptoStorePtr;
 
 use polkadot_subsystem::{
 	messages::AvailabilityDistributionMessage, FromOverseer, OverseerSignal, SpawnedSubsystem,
-	Subsystem, SubsystemContext, SubsystemError,
+	SubsystemContext, SubsystemError,
+	overseer,
 };
 
 /// Error and [`Result`] type for this subsystem.
@@ -58,9 +59,10 @@ pub struct AvailabilityDistributionSubsystem {
 	metrics: Metrics,
 }
 
-impl<Context> Subsystem<Context> for AvailabilityDistributionSubsystem
+impl<Context> overseer::Subsystem<Context, SubsystemError> for AvailabilityDistributionSubsystem
 where
-	Context: SubsystemContext<Message = AvailabilityDistributionMessage> + Sync + Send,
+	Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
+	Context: overseer::SubsystemContext<Message = AvailabilityDistributionMessage>,
 {
 	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		let future = self
@@ -86,7 +88,8 @@ impl AvailabilityDistributionSubsystem {
 	/// Start processing work as passed on from the Overseer.
 	async fn run<Context>(mut self, mut ctx: Context) -> std::result::Result<(), Fatal>
 	where
-		Context: SubsystemContext<Message = AvailabilityDistributionMessage> + Sync + Send,
+		Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
+		Context: overseer::SubsystemContext<Message = AvailabilityDistributionMessage>,
 	{
 		let mut requester = Requester::new(self.metrics.clone()).fuse();
 		loop {
diff --git a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs
index 3d7e41e6ae6bb2bbf16483c10724033b8614feb8..650072f0ea7e3b99ed627776292dd9ab783ba463 100644
--- a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs
+++ b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs
@@ -29,7 +29,7 @@ use polkadot_primitives::v1::{
 use polkadot_node_primitives::PoV;
 use polkadot_subsystem::{
 	SubsystemContext,
-	messages::{AllMessages, NetworkBridgeMessage, IfDisconnected}
+	messages::{NetworkBridgeMessage, IfDisconnected}
 };
 use polkadot_node_subsystem_util::runtime::RuntimeInfo;
 
@@ -62,7 +62,6 @@ where
 	let full_req = Requests::PoVFetching(req);
 
 	ctx.send_message(
-		AllMessages::NetworkBridge(
 			NetworkBridgeMessage::SendRequests(
 				vec![full_req],
 				// We are supposed to be connected to validators of our group via `PeerSet`,
@@ -70,7 +69,7 @@ where
 				// longer to get established, so we try to connect in any case.
 				IfDisconnected::TryConnect
 			)
-	)).await;
+	).await;
 
 	let span = jaeger::Span::new(candidate_hash, "fetch-pov")
 		.with_validator_index(from_validator)
@@ -130,7 +129,7 @@ mod tests {
 	use polkadot_primitives::v1::{CandidateHash, Hash, ValidatorIndex};
 	use polkadot_node_primitives::BlockData;
 	use polkadot_subsystem_testhelpers as test_helpers;
-	use polkadot_subsystem::messages::{AvailabilityDistributionMessage, RuntimeApiMessage, RuntimeApiRequest};
+	use polkadot_subsystem::messages::{AllMessages, AvailabilityDistributionMessage, RuntimeApiMessage, RuntimeApiRequest};
 
 	use super::*;
 	use crate::LOG_TARGET;
diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs
index db8790435b2cca6afcb09cba186d8465189d34b8..240cf8c5e9a6f904c5f86b2f1c93e8c7d891f634 100644
--- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs
+++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs
@@ -30,7 +30,6 @@ use polkadot_primitives::v1::{CandidateHash, ValidatorIndex};
 use polkadot_node_primitives::{BlockData, PoV};
 use polkadot_node_network_protocol::request_response::v1;
 use polkadot_node_network_protocol::request_response::Recipient;
-use polkadot_subsystem::messages::AllMessages;
 
 use crate::metrics::Metrics;
 use crate::tests::mock::get_valid_chunk_data;
@@ -300,4 +299,3 @@ fn get_test_running_task() -> (RunningTask, mpsc::Receiver<FromFetchTask>) {
 		rx
 	)
 }
-
diff --git a/polkadot/node/network/availability-distribution/src/requester/mod.rs b/polkadot/node/network/availability-distribution/src/requester/mod.rs
index 8e6f1451c60f8ca24834e267333bc8801c5e5f28..68ebe90ca0b155c293fdd6be08467a00947a1b8b 100644
--- a/polkadot/node/network/availability-distribution/src/requester/mod.rs
+++ b/polkadot/node/network/availability-distribution/src/requester/mod.rs
@@ -33,7 +33,8 @@ use futures::{
 use polkadot_node_subsystem_util::runtime::{RuntimeInfo, get_occupied_cores};
 use polkadot_primitives::v1::{CandidateHash, Hash, OccupiedCore};
 use polkadot_subsystem::{
-	messages::AllMessages, ActiveLeavesUpdate, SubsystemContext, ActivatedLeaf,
+	messages::AllMessages,
+	ActiveLeavesUpdate, SubsystemContext, ActivatedLeaf,
 };
 
 use super::{LOG_TARGET, Metrics};
@@ -229,4 +230,3 @@ impl Stream for Requester {
 		}
 	}
 }
-
diff --git a/polkadot/node/network/availability-distribution/src/responder.rs b/polkadot/node/network/availability-distribution/src/responder.rs
index 9a20510eb354d69b90cfe37657c1651f029e357f..e45574b5a6ed42a281eced4462f92f5e073040ab 100644
--- a/polkadot/node/network/availability-distribution/src/responder.rs
+++ b/polkadot/node/network/availability-distribution/src/responder.rs
@@ -24,7 +24,7 @@ use polkadot_node_network_protocol::request_response::{request::IncomingRequest,
 use polkadot_primitives::v1::{CandidateHash, ValidatorIndex};
 use polkadot_node_primitives::{AvailableData, ErasureChunk};
 use polkadot_subsystem::{
-	messages::{AllMessages, AvailabilityStoreMessage},
+	messages::AvailabilityStoreMessage,
 	SubsystemContext, jaeger,
 };
 
@@ -158,9 +158,9 @@ where
 	Context: SubsystemContext,
 {
 	let (tx, rx) = oneshot::channel();
-	ctx.send_message(AllMessages::AvailabilityStore(
+	ctx.send_message(
 		AvailabilityStoreMessage::QueryChunk(candidate_hash, validator_index, tx),
-	))
+	)
 	.await;
 
 	let result = rx.await.map_err(|e| {
@@ -185,9 +185,9 @@ where
 	Context: SubsystemContext,
 {
 	let (tx, rx) = oneshot::channel();
-	ctx.send_message(AllMessages::AvailabilityStore(
+	ctx.send_message(
 		AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx),
-	))
+	)
 	.await;
 
 	let result = rx.await.map_err(|e| NonFatal::QueryAvailableDataResponseChannel(e))?;
diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs
index ccbeba620472eb9386070ef5a0f3b9fe49c63729..b164675083d10c51825dd57bf0099df3a026acf6 100644
--- a/polkadot/node/network/availability-recovery/src/lib.rs
+++ b/polkadot/node/network/availability-recovery/src/lib.rs
@@ -34,12 +34,13 @@ use polkadot_primitives::v1::{
 };
 use polkadot_node_primitives::{ErasureChunk, AvailableData};
 use polkadot_subsystem::{
-	SubsystemContext, SubsystemResult, SubsystemError, Subsystem, SpawnedSubsystem, FromOverseer,
+	overseer::{self, Subsystem},
+	SubsystemContext, SubsystemResult, SubsystemError, SpawnedSubsystem, FromOverseer,
 	OverseerSignal, ActiveLeavesUpdate, SubsystemSender,
 	errors::RecoveryError,
 	jaeger,
 	messages::{
-		AvailabilityStoreMessage, AvailabilityRecoveryMessage, AllMessages, NetworkBridgeMessage,
+		AvailabilityStoreMessage, AvailabilityRecoveryMessage, NetworkBridgeMessage,
 	},
 };
 use polkadot_node_network_protocol::{
@@ -573,10 +574,12 @@ impl Default for State {
 	}
 }
 
-impl<C> Subsystem<C> for AvailabilityRecoverySubsystem
-	where C: SubsystemContext<Message = AvailabilityRecoveryMessage>
+impl<Context> Subsystem<Context, SubsystemError> for AvailabilityRecoverySubsystem
+where
+	Context: SubsystemContext<Message = AvailabilityRecoveryMessage>,
+	Context: overseer::SubsystemContext<Message = AvailabilityRecoveryMessage>,
 {
-	fn start(self, ctx: C) -> SpawnedSubsystem {
+	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		let future = self.run(ctx)
 			.map_err(|e| SubsystemError::with_origin("availability-recovery", e))
 			.boxed();
@@ -609,14 +612,18 @@ async fn handle_signal(
 }
 
 /// Machinery around launching interactions into the background.
-async fn launch_interaction(
+async fn launch_interaction<Context>(
 	state: &mut State,
-	ctx: &mut impl SubsystemContext<Message = AvailabilityRecoveryMessage>,
+	ctx: &mut Context,
 	session_info: SessionInfo,
 	receipt: CandidateReceipt,
 	backing_group: Option<GroupIndex>,
 	response_sender: oneshot::Sender<Result<AvailableData, RecoveryError>>,
-) -> error::Result<()> {
+) -> error::Result<()>
+where
+	Context: SubsystemContext<Message = AvailabilityRecoveryMessage>,
+	Context: overseer::SubsystemContext<Message = AvailabilityRecoveryMessage>,
+{
 	let candidate_hash = receipt.hash();
 
 	let params = InteractionParams {
@@ -662,14 +669,18 @@ async fn launch_interaction(
 }
 
 /// Handles an availability recovery request.
-async fn handle_recover(
+async fn handle_recover<Context>(
 	state: &mut State,
-	ctx: &mut impl SubsystemContext<Message = AvailabilityRecoveryMessage>,
+	ctx: &mut Context,
 	receipt: CandidateReceipt,
 	session_index: SessionIndex,
 	backing_group: Option<GroupIndex>,
 	response_sender: oneshot::Sender<Result<AvailableData, RecoveryError>>,
-) -> error::Result<()> {
+) -> error::Result<()>
+where
+	Context: SubsystemContext<Message = AvailabilityRecoveryMessage>,
+	Context: overseer::SubsystemContext<Message = AvailabilityRecoveryMessage>,
+{
 	let candidate_hash = receipt.hash();
 
 	let span = jaeger::Span::new(candidate_hash, "availbility-recovery")
@@ -724,14 +735,18 @@ async fn handle_recover(
 }
 
 /// Queries a chunk from av-store.
-async fn query_full_data(
-	ctx: &mut impl SubsystemContext<Message = AvailabilityRecoveryMessage>,
+async fn query_full_data<Context>(
+	ctx: &mut Context,
 	candidate_hash: CandidateHash,
-) -> error::Result<Option<AvailableData>> {
+) -> error::Result<Option<AvailableData>>
+where
+	Context: SubsystemContext<Message = AvailabilityRecoveryMessage>,
+	Context: overseer::SubsystemContext<Message = AvailabilityRecoveryMessage>,
+{
 	let (tx, rx) = oneshot::channel();
-	ctx.send_message(AllMessages::AvailabilityStore(
+	ctx.send_message(
 		AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx),
-	)).await;
+	).await;
 
 	Ok(rx.await.map_err(error::Error::CanceledQueryFullData)?)
 }
@@ -747,10 +762,14 @@ impl AvailabilityRecoverySubsystem {
 		Self { fast_path: false }
 	}
 
-	async fn run(
+	async fn run<Context>(
 		self,
-		mut ctx: impl SubsystemContext<Message = AvailabilityRecoveryMessage>,
-	) -> SubsystemResult<()> {
+		mut ctx: Context,
+	) -> SubsystemResult<()>
+	where
+		Context: SubsystemContext<Message = AvailabilityRecoveryMessage>,
+		Context: overseer::SubsystemContext<Message = AvailabilityRecoveryMessage>,
+	{
 		let mut state = State::default();
 
 		loop {
diff --git a/polkadot/node/network/availability-recovery/src/tests.rs b/polkadot/node/network/availability-recovery/src/tests.rs
index 81d9a53c9a5062f9c44dbc3b040632c48bcf6646..d98de4f2bb069fb526a1fee22cac69f351c4f026 100644
--- a/polkadot/node/network/availability-recovery/src/tests.rs
+++ b/polkadot/node/network/availability-recovery/src/tests.rs
@@ -34,7 +34,7 @@ use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks};
 use polkadot_node_subsystem_util::TimeoutExt;
 use polkadot_subsystem_testhelpers as test_helpers;
 use polkadot_subsystem::{
-	messages::{RuntimeApiMessage, RuntimeApiRequest}, jaeger, ActivatedLeaf, LeafStatus,
+	messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}, jaeger, ActivatedLeaf, LeafStatus,
 };
 
 type VirtualOverseer = test_helpers::TestSubsystemContextHandle<AvailabilityRecoveryMessage>;
diff --git a/polkadot/node/network/bitfield-distribution/src/lib.rs b/polkadot/node/network/bitfield-distribution/src/lib.rs
index 55332389e0c90e2f830a4bc2dd9b50c1d7885b43..e339e6dc66f2ac35abb00f8dc78f33d7b636e075 100644
--- a/polkadot/node/network/bitfield-distribution/src/lib.rs
+++ b/polkadot/node/network/bitfield-distribution/src/lib.rs
@@ -26,9 +26,10 @@ use futures::{channel::oneshot, FutureExt};
 
 use polkadot_subsystem::messages::*;
 use polkadot_subsystem::{
-	PerLeafSpan, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, Subsystem,
-	SubsystemContext, SubsystemResult,
+	PerLeafSpan, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem,
+	SubsystemContext, SubsystemResult, SubsystemError,
 	jaeger,
+	overseer,
 };
 use polkadot_node_subsystem_util::{
 	metrics::{self, prometheus},
@@ -162,6 +163,7 @@ impl BitfieldDistribution {
 	async fn run<Context>(self, mut ctx: Context)
 	where
 		Context: SubsystemContext<Message = BitfieldDistributionMessage>,
+		Context: overseer::SubsystemContext<Message = BitfieldDistributionMessage>,
 	{
 		// work: process incoming messages from the overseer and process accordingly.
 		let mut state = ProtocolState::default();
@@ -250,9 +252,9 @@ where
 {
 	tracing::trace!(target: LOG_TARGET, ?rep, peer_id = %peer, "reputation change");
 
-	ctx.send_message(AllMessages::NetworkBridge(
+	ctx.send_message(
 		NetworkBridgeMessage::ReportPeer(peer, rep),
-	))
+	)
 	.await
 }
 
@@ -328,7 +330,7 @@ where
 
 	let _span = span.child("provisionable");
 	// notify the overseer about a new and valid signed bitfield
-	ctx.send_message(AllMessages::Provisioner(
+	ctx.send_message(
 		ProvisionerMessage::ProvisionableData(
 			message.relay_parent,
 			ProvisionableData::Bitfield(
@@ -336,7 +338,7 @@ where
 				message.signed_availability.clone(),
 			),
 		),
-	))
+	)
 	.await;
 
 	drop(_span);
@@ -383,12 +385,12 @@ where
 		);
 	} else {
 		let _span = span.child("gossip");
-		ctx.send_message(AllMessages::NetworkBridge(
+		ctx.send_message(
 			NetworkBridgeMessage::SendValidationMessage(
 				interested_peers,
 				message.into_validation_protocol(),
 			),
-		))
+		)
 		.await;
 	}
 }
@@ -687,19 +689,20 @@ where
 		.or_default()
 		.insert(validator.clone());
 
-	ctx.send_message(AllMessages::NetworkBridge(
+	ctx.send_message(
 		NetworkBridgeMessage::SendValidationMessage(
 			vec![dest],
 			message.into_validation_protocol(),
 		),
-	)).await;
+	).await;
 }
 
-impl<C> Subsystem<C> for BitfieldDistribution
+impl<Context> overseer::Subsystem<Context, SubsystemError> for BitfieldDistribution
 where
-	C: SubsystemContext<Message = BitfieldDistributionMessage> + Sync + Send,
+	Context: SubsystemContext<Message = BitfieldDistributionMessage>,
+	Context: overseer::SubsystemContext<Message = BitfieldDistributionMessage>,
 {
-	fn start(self, ctx: C) -> SpawnedSubsystem {
+	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		let future = self.run(ctx)
 			.map(|_| Ok(()))
 			.boxed();
@@ -722,18 +725,17 @@ where
 	let (validators_tx, validators_rx) = oneshot::channel();
 	let (session_tx, session_rx) = oneshot::channel();
 
-	let query_validators = AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+	// query validators
+	ctx.send_message(RuntimeApiMessage::Request(
 		relay_parent.clone(),
 		RuntimeApiRequest::Validators(validators_tx),
-	));
+	)).await;
 
-	let query_signing = AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+	// query signing context
+	ctx.send_message(RuntimeApiMessage::Request(
 		relay_parent.clone(),
 		RuntimeApiRequest::SessionIndexForChild(session_tx),
-	));
-
-	ctx.send_messages(std::iter::once(query_validators).chain(std::iter::once(query_signing)))
-		.await;
+	)).await;
 
 	match (validators_rx.await?, session_rx.await?) {
 		(Ok(v), Ok(s)) => Ok(Some((
@@ -837,4 +839,3 @@ impl metrics::Metrics for Metrics {
 		Ok(Metrics(Some(metrics)))
 	}
 }
-
diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml
index d9f8fedc19f7c0ebb9f90a975a940cd42979f5dd..84d8aaba0e1a13ad2213d5e88d29dd45802f07a0 100644
--- a/polkadot/node/network/bridge/Cargo.toml
+++ b/polkadot/node/network/bridge/Cargo.toml
@@ -14,6 +14,7 @@ sc-authority-discovery = { git = "https://github.com/paritytech/substrate", bran
 sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
 polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
+polkadot-overseer = { path = "../../overseer" }
 polkadot-node-network-protocol = { path = "../protocol" }
 polkadot-node-subsystem-util = { path = "../../subsystem-util"}
 strum = "0.20.0"
diff --git a/polkadot/node/network/bridge/src/lib.rs b/polkadot/node/network/bridge/src/lib.rs
index 87c6fd9494b04bce8e259d11a24a78a455c26925..101ec01466a53cd7461cad95f7ba11ee0633eab8 100644
--- a/polkadot/node/network/bridge/src/lib.rs
+++ b/polkadot/node/network/bridge/src/lib.rs
@@ -27,14 +27,23 @@ use futures::stream::BoxStream;
 use sc_network::Event as NetworkEvent;
 use sp_consensus::SyncOracle;
 
-use polkadot_subsystem::{
-	ActivatedLeaf, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem,
-	Subsystem, SubsystemContext, SubsystemError, SubsystemResult, SubsystemSender,
-	messages::StatementDistributionMessage
+use polkadot_overseer::gen::{
+	Subsystem,
+	OverseerError,
 };
-use polkadot_subsystem::messages::{
-	NetworkBridgeMessage, AllMessages,
-	CollatorProtocolMessage, NetworkBridgeEvent,
+use polkadot_subsystem::{
+	overseer,
+	OverseerSignal,
+	FromOverseer,
+	SpawnedSubsystem,
+	SubsystemContext,
+	SubsystemSender,
+	errors::{SubsystemError, SubsystemResult},
+	ActivatedLeaf, ActiveLeavesUpdate,
+	messages::{
+		AllMessages, StatementDistributionMessage,
+		NetworkBridgeMessage, CollatorProtocolMessage, NetworkBridgeEvent,
+	},
 };
 use polkadot_primitives::v1::{Hash, BlockNumber};
 use polkadot_node_network_protocol::{
@@ -292,11 +301,11 @@ impl<N, AD> NetworkBridge<N, AD> {
 	}
 }
 
-impl<Net, AD, Context> Subsystem<Context> for NetworkBridge<Net, AD>
+impl<Net, AD, Context> Subsystem<Context, SubsystemError> for NetworkBridge<Net, AD>
 	where
 		Net: Network + Sync,
 		AD: validator_discovery::AuthorityDiscovery,
-		Context: SubsystemContext<Message=NetworkBridgeMessage>,
+		Context: SubsystemContext<Message = NetworkBridgeMessage> + overseer::SubsystemContext<Message = NetworkBridgeMessage>,
 {
 	fn start(mut self, ctx: Context) -> SpawnedSubsystem {
 		// The stream of networking events has to be created at initialization, otherwise the
@@ -325,7 +334,7 @@ struct PeerData {
 #[derive(Debug)]
 enum UnexpectedAbort {
 	/// Received error from overseer:
-	SubsystemError(polkadot_subsystem::SubsystemError),
+	SubsystemError(SubsystemError),
 	/// The stream of incoming events concluded.
 	EventStreamConcluded,
 	/// The stream of incoming requests concluded.
@@ -338,6 +347,12 @@ impl From<SubsystemError> for UnexpectedAbort {
 	}
 }
 
+impl From<OverseerError> for UnexpectedAbort {
+	fn from(e: OverseerError) -> Self {
+		UnexpectedAbort::SubsystemError(SubsystemError::from(e))
+	}
+}
+
 #[derive(Default, Clone)]
 struct Shared(Arc<Mutex<SharedInner>>);
 
@@ -363,6 +378,7 @@ async fn handle_subsystem_messages<Context, N, AD>(
 ) -> Result<(), UnexpectedAbort>
 where
 	Context: SubsystemContext<Message = NetworkBridgeMessage>,
+	Context: overseer::SubsystemContext<Message = NetworkBridgeMessage>,
 	N: Network,
 	AD: validator_discovery::AuthorityDiscovery,
 {
@@ -854,14 +870,15 @@ async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
 /// #fn is_send<T: Send>();
 /// #is_send::<parking_lot::MutexGuard<'static, ()>();
 /// ```
-async fn run_network<N, AD>(
+async fn run_network<N, AD, Context>(
 	bridge: NetworkBridge<N, AD>,
-	mut ctx: impl SubsystemContext<Message=NetworkBridgeMessage>,
+	mut ctx: Context,
 	network_stream: BoxStream<'static, NetworkEvent>,
 ) -> SubsystemResult<()>
 where
 	N: Network,
 	AD: validator_discovery::AuthorityDiscovery,
+	Context: SubsystemContext<Message=NetworkBridgeMessage> + overseer::SubsystemContext<Message=NetworkBridgeMessage>,
 {
 	let shared = Shared::default();
 
@@ -877,7 +894,7 @@ where
 		.get_statement_fetching()
 		.expect("Gets initialized, must be `Some` on startup. qed.");
 
-	let (remote, network_event_handler) = handle_network_messages(
+	let (remote, network_event_handler) = handle_network_messages::<>(
 		ctx.sender().clone(),
 		network_service.clone(),
 		network_stream,
@@ -889,9 +906,9 @@ where
 
 	ctx.spawn("network-bridge-network-worker", Box::pin(remote))?;
 
-	ctx.send_message(AllMessages::StatementDistribution(
+	ctx.send_message(
 		StatementDistributionMessage::StatementFetchingReceiver(statement_receiver)
-	)).await;
+	).await;
 
 	let subsystem_event_handler = handle_subsystem_messages(
 		ctx,
@@ -952,7 +969,7 @@ fn construct_view(live_heads: impl DoubleEndedIterator<Item = Hash>, finalized_n
 
 fn update_our_view(
 	net: &mut impl Network,
-	ctx: &mut impl SubsystemContext<Message = NetworkBridgeMessage>,
+	ctx: &mut impl SubsystemContext<Message=NetworkBridgeMessage, AllMessages=AllMessages>,
 	live_heads: &[ActivatedLeaf],
 	shared: &Shared,
 	finalized_number: BlockNumber,
diff --git a/polkadot/node/network/bridge/src/multiplexer.rs b/polkadot/node/network/bridge/src/multiplexer.rs
index ad65309d3eea3993c8f87f5294156ec10b84d7d4..b88cc414695932410375b8e7a9ba6ef163a80353 100644
--- a/polkadot/node/network/bridge/src/multiplexer.rs
+++ b/polkadot/node/network/bridge/src/multiplexer.rs
@@ -29,7 +29,7 @@ use sc_network::PeerId;
 use polkadot_node_network_protocol::request_response::{
 	request::IncomingRequest, v1, Protocol, RequestResponseConfig,
 };
-use polkadot_subsystem::messages::AllMessages;
+use polkadot_overseer::AllMessages;
 
 /// Multiplex incoming network requests.
 ///
@@ -151,28 +151,28 @@ fn multiplex_single(
 	}: network::IncomingRequest,
 ) -> Result<AllMessages, RequestMultiplexError> {
 	let r = match p {
-		Protocol::ChunkFetching => From::from(IncomingRequest::new(
+		Protocol::ChunkFetching => AllMessages::from(IncomingRequest::new(
 			peer,
 			decode_with_peer::<v1::ChunkFetchingRequest>(peer, payload)?,
 			pending_response,
 		)),
-		Protocol::CollationFetching => From::from(IncomingRequest::new(
+		Protocol::CollationFetching => AllMessages::from(IncomingRequest::new(
 			peer,
 			decode_with_peer::<v1::CollationFetchingRequest>(peer, payload)?,
 			pending_response,
 		)),
-		Protocol::PoVFetching => From::from(IncomingRequest::new(
+		Protocol::PoVFetching => AllMessages::from(IncomingRequest::new(
 			peer,
 			decode_with_peer::<v1::PoVFetchingRequest>(peer, payload)?,
 			pending_response,
 		)),
-		Protocol::AvailableDataFetching => From::from(IncomingRequest::new(
+		Protocol::AvailableDataFetching => AllMessages::from(IncomingRequest::new(
 			peer,
 			decode_with_peer::<v1::AvailableDataFetchingRequest>(peer, payload)?,
 			pending_response,
 		)),
 		Protocol::StatementFetching => {
-			panic!("Statement fetching requests are handled directly. qed.");
+			unreachable!("Statement fetching requests are handled directly. qed.");
 		}
 	};
 	Ok(r)
diff --git a/polkadot/node/network/bridge/src/tests.rs b/polkadot/node/network/bridge/src/tests.rs
index 48296fb94f05aaf3ca0266dba12fef1b6c1a4a41..4fcb57d7554ee3c2e94944c0c3eaf6612c59dc36 100644
--- a/polkadot/node/network/bridge/src/tests.rs
+++ b/polkadot/node/network/bridge/src/tests.rs
@@ -1260,6 +1260,7 @@ fn spread_event_to_subsystems_is_up_to_date() {
 	let mut cnt = 0_usize;
 	for msg in AllMessages::dispatch_iter(NetworkBridgeEvent::PeerDisconnected(PeerId::random())) {
 		match msg {
+			AllMessages::Empty => unreachable!("Nobody cares about the dummy"),
 			AllMessages::CandidateValidation(_) => unreachable!("Not interested in network events"),
 			AllMessages::CandidateBacking(_) => unreachable!("Not interested in network events"),
 			AllMessages::ChainApi(_) => unreachable!("Not interested in network events"),
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
index 4cc52b33fde7a723a5399018ccc305122b684be4..643429b9c060f0aaae699070a34f32c75bdabd31 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
@@ -24,9 +24,10 @@ use polkadot_primitives::v1::{
 	Id as ParaId,
 };
 use polkadot_subsystem::{
+	overseer,
 	FromOverseer, OverseerSignal, PerLeafSpan, SubsystemContext, jaeger,
 	messages::{
-		AllMessages, CollatorProtocolMessage, NetworkBridgeEvent, NetworkBridgeMessage,
+		CollatorProtocolMessage, NetworkBridgeEvent, NetworkBridgeMessage,
 	},
 };
 use polkadot_node_network_protocol::{
@@ -301,15 +302,19 @@ impl State {
 /// or the relay-parent isn't in the active-leaves set, we ignore the message
 /// as it must be invalid in that case - although this indicates a logic error
 /// elsewhere in the node.
-async fn distribute_collation(
-	ctx: &mut impl SubsystemContext,
+async fn distribute_collation<Context>(
+	ctx: &mut Context,
 	runtime: &mut RuntimeInfo,
 	state: &mut State,
 	id: ParaId,
 	receipt: CandidateReceipt,
 	pov: PoV,
 	result_sender: Option<oneshot::Sender<SignedFullStatement>>,
-) -> Result<()> {
+) -> Result<()>
+where
+	Context: SubsystemContext<Message = CollatorProtocolMessage>,
+	Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
+{
 	let relay_parent = receipt.descriptor.relay_parent;
 
 	// This collation is not in the active-leaves set.
@@ -400,11 +405,15 @@ async fn distribute_collation(
 
 /// Get the Id of the Core that is assigned to the para being collated on if any
 /// and the total number of cores.
-async fn determine_core(
-	ctx: &mut impl SubsystemContext,
+async fn determine_core<Context>(
+	ctx: &mut Context,
 	para_id: ParaId,
 	relay_parent: Hash,
-) -> Result<Option<(CoreIndex, usize)>> {
+) -> Result<Option<(CoreIndex, usize)>>
+where
+	Context: SubsystemContext<Message = CollatorProtocolMessage>,
+	Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
+{
 	let cores = get_availability_cores(ctx, relay_parent).await?;
 
 	for (idx, core) in cores.iter().enumerate() {
@@ -430,13 +439,17 @@ struct GroupValidators {
 /// Figure out current and next group of validators assigned to the para being collated on.
 ///
 /// Returns [`ValidatorId`]'s of current and next group as determined based on the `relay_parent`.
-async fn determine_our_validators(
-	ctx: &mut impl SubsystemContext,
+async fn determine_our_validators<Context>(
+	ctx: &mut Context,
 	runtime: &mut RuntimeInfo,
 	core_index: CoreIndex,
 	cores: usize,
 	relay_parent: Hash,
-) -> Result<(GroupValidators, GroupValidators)> {
+) -> Result<(GroupValidators, GroupValidators)>
+where
+	Context: SubsystemContext<Message = CollatorProtocolMessage>,
+	Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
+{
 	let session_index = runtime.get_session_index(ctx, relay_parent).await?;
 	let info = &runtime.get_session_info_by_index(ctx, relay_parent, session_index)
 		.await?
@@ -469,11 +482,15 @@ async fn determine_our_validators(
 }
 
 /// Issue a `Declare` collation message to the given `peer`.
-async fn declare(
-	ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
+async fn declare<Context>(
+	ctx: &mut Context,
 	state: &mut State,
 	peer: PeerId,
-) {
+)
+where
+	Context: SubsystemContext<Message = CollatorProtocolMessage>,
+	Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
+{
 	let declare_signature_payload = protocol_v1::declare_signature_payload(&state.local_peer_id);
 
 	if let Some(para_id) = state.collating_on {
@@ -483,39 +500,47 @@ async fn declare(
 			state.collator_pair.sign(&declare_signature_payload),
 		);
 
-		ctx.send_message(AllMessages::NetworkBridge(
+		ctx.send_message(
 			NetworkBridgeMessage::SendCollationMessage(
 				vec![peer],
 				protocol_v1::CollationProtocol::CollatorProtocol(wire_message),
 			)
-		)).await;
+		).await;
 	}
 }
 
 /// Issue a connection request to a set of validators and
 /// revoke the previous connection request.
-async fn connect_to_validators(
-	ctx: &mut impl SubsystemContext,
+async fn connect_to_validators<Context>(
+	ctx: &mut Context,
 	validator_ids: Vec<AuthorityDiscoveryId>,
-) {
+)
+where
+	Context: SubsystemContext<Message = CollatorProtocolMessage>,
+	Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
+{
 	// ignore address resolution failure
 	// will reissue a new request on new collation
 	let (failed, _) = oneshot::channel();
-	ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::ConnectToValidators {
+	ctx.send_message(NetworkBridgeMessage::ConnectToValidators {
 		validator_ids, peer_set: PeerSet::Collation, failed,
-	})).await;
+	}).await;
 }
 
 /// Advertise collation to the given `peer`.
 ///
 /// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is
 /// set as validator for our para at the given `relay_parent`.
-async fn advertise_collation(
-	ctx: &mut impl SubsystemContext,
+async fn advertise_collation<Context>(
+	ctx: &mut Context,
 	state: &mut State,
 	relay_parent: Hash,
 	peer: PeerId,
-) {
+)
+where
+	Context: SubsystemContext<Message = CollatorProtocolMessage>,
+	Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
+{
 	let should_advertise = state.our_validators_groups
 		.get(&relay_parent)
 		.map(|g| g.should_advertise_to(&state.peer_ids, &peer))
@@ -555,12 +580,12 @@ async fn advertise_collation(
 		relay_parent,
 	);
 
-	ctx.send_message(AllMessages::NetworkBridge(
+	ctx.send_message(
 		NetworkBridgeMessage::SendCollationMessage(
 			vec![peer.clone()],
 			protocol_v1::CollationProtocol::CollatorProtocol(wire_message),
 		)
-	)).await;
+	).await;
 
 	if let Some(validators) = state.our_validators_groups.get_mut(&relay_parent) {
 		validators.advertised_to_peer(&state.peer_ids, &peer);
@@ -570,12 +595,16 @@ async fn advertise_collation(
 }
 
 /// The main incoming message dispatching switch.
-async fn process_msg(
-	ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
+async fn process_msg<Context>(
+	ctx: &mut Context,
 	runtime: &mut RuntimeInfo,
 	state: &mut State,
 	msg: CollatorProtocolMessage,
-) -> Result<()> {
+) -> Result<()>
+where
+	Context: SubsystemContext<Message = CollatorProtocolMessage>,
+	Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
+{
 	use CollatorProtocolMessage::*;
 
 	let _timer = state.metrics.time_process_msg();
@@ -718,13 +747,17 @@ async fn send_collation(
 }
 
 /// A networking messages switch.
-async fn handle_incoming_peer_message(
-	ctx: &mut impl SubsystemContext,
+async fn handle_incoming_peer_message<Context>(
+	ctx: &mut Context,
 	runtime: &mut RuntimeInfo,
 	state: &mut State,
 	origin: PeerId,
 	msg: protocol_v1::CollatorProtocolMessage,
-) -> Result<()> {
+) -> Result<()>
+where
+	Context: SubsystemContext<Message = CollatorProtocolMessage>,
+	Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
+{
 	use protocol_v1::CollatorProtocolMessage::*;
 
 	match msg {
@@ -737,7 +770,7 @@ async fn handle_incoming_peer_message(
 
 			// If we are declared to, this is another collator, and we should disconnect.
 			ctx.send_message(
-				NetworkBridgeMessage::DisconnectPeer(origin, PeerSet::Collation).into()
+				NetworkBridgeMessage::DisconnectPeer(origin, PeerSet::Collation)
 			).await;
 		}
 		AdvertiseCollation(_) => {
@@ -748,12 +781,12 @@ async fn handle_incoming_peer_message(
 			);
 
 			ctx.send_message(
-				NetworkBridgeMessage::ReportPeer(origin.clone(), COST_UNEXPECTED_MESSAGE).into()
+				NetworkBridgeMessage::ReportPeer(origin.clone(), COST_UNEXPECTED_MESSAGE)
 			).await;
 
 			// If we are advertised to, this is another collator, and we should disconnect.
 			ctx.send_message(
-				NetworkBridgeMessage::DisconnectPeer(origin, PeerSet::Collation).into()
+				NetworkBridgeMessage::DisconnectPeer(origin, PeerSet::Collation)
 			).await;
 		}
 		CollationSeconded(relay_parent, statement) => {
@@ -789,12 +822,16 @@ async fn handle_incoming_peer_message(
 }
 
 /// Our view has changed.
-async fn handle_peer_view_change(
-	ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
+async fn handle_peer_view_change<Context>(
+	ctx: &mut Context,
 	state: &mut State,
 	peer_id: PeerId,
 	view: View,
-) {
+)
+where
+	Context: SubsystemContext<Message = CollatorProtocolMessage>,
+	Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
+{
 	let current = state.peer_views.entry(peer_id.clone()).or_default();
 
 	let added: Vec<Hash> = view.difference(&*current).cloned().collect();
@@ -807,12 +844,16 @@ async fn handle_peer_view_change(
 }
 
 /// Bridge messages switch.
-async fn handle_network_msg(
-	ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
+async fn handle_network_msg<Context>(
+	ctx: &mut Context,
 	runtime: &mut RuntimeInfo,
 	state: &mut State,
 	bridge_message: NetworkBridgeEvent<protocol_v1::CollatorProtocolMessage>,
-) -> Result<()> {
+) -> Result<()>
+where
+	Context: SubsystemContext<Message = CollatorProtocolMessage>,
+	Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
+{
 	use NetworkBridgeEvent::*;
 
 	match bridge_message {
@@ -917,13 +958,16 @@ async fn handle_our_view_change(
 }
 
 /// The collator protocol collator side main loop.
-pub(crate) async fn run(
-	mut ctx: impl SubsystemContext<Message = CollatorProtocolMessage>,
+pub(crate) async fn run<Context>(
+	mut ctx: Context,
 	local_peer_id: PeerId,
 	collator_pair: CollatorPair,
 	metrics: Metrics,
-) -> Result<()> {
-	use FromOverseer::*;
+) -> Result<()>
+where
+	Context: SubsystemContext<Message = CollatorProtocolMessage>,
+	Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>
+{
 	use OverseerSignal::*;
 
 	let mut state = State::new(local_peer_id, collator_pair, metrics);
@@ -932,15 +976,15 @@ pub(crate) async fn run(
 	loop {
 		select! {
 			msg = ctx.recv().fuse() => match msg.map_err(Fatal::SubsystemReceive)? {
-				Communication { msg } => {
+				FromOverseer::Communication { msg } => {
 					log_error(
 						process_msg(&mut ctx, &mut runtime, &mut state, msg).await,
 						"Failed to process message"
 					)?;
 				},
-				Signal(ActiveLeaves(_update)) => {}
-				Signal(BlockFinalized(..)) => {}
-				Signal(Conclude) => return Ok(()),
+				FromOverseer::Signal(ActiveLeaves(_update)) => {}
+				FromOverseer::Signal(BlockFinalized(..)) => {}
+				FromOverseer::Signal(Conclude) => return Ok(()),
 			},
 			relay_parent = state.active_collation_fetches.select_next_some() => {
 				let next = if let Some(waiting) = state.waiting_collation_fetches.get_mut(&relay_parent) {
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests.rs
index a716dbc16a78da013fce50f33d419c219164f600..96319d8c151be609930e619c9d7726bf3651e7a6 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/tests.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/tests.rs
@@ -39,7 +39,7 @@ use polkadot_primitives::v1::{
 use polkadot_node_primitives::BlockData;
 use polkadot_subsystem::{
 	jaeger,
-	messages::{RuntimeApiMessage, RuntimeApiRequest},
+	messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest},
 	ActiveLeavesUpdate, ActivatedLeaf, LeafStatus,
 };
 use polkadot_subsystem_testhelpers as test_helpers;
diff --git a/polkadot/node/network/collator-protocol/src/error.rs b/polkadot/node/network/collator-protocol/src/error.rs
index 37f8df0731b22636369e7d19a2e62c3217a2e3c5..4fa6ac1959ccb1ed20ee679e7c142f3f1e1eb7aa 100644
--- a/polkadot/node/network/collator-protocol/src/error.rs
+++ b/polkadot/node/network/collator-protocol/src/error.rs
@@ -18,7 +18,7 @@
 //! Error handling related code and Error/Result definitions.
 
 use polkadot_node_primitives::UncheckedSignedFullStatement;
-use polkadot_subsystem::SubsystemError;
+use polkadot_subsystem::errors::SubsystemError;
 use thiserror::Error;
 
 use polkadot_node_subsystem_util::{Fault, runtime, unwrap_non_fatal};
diff --git a/polkadot/node/network/collator-protocol/src/lib.rs b/polkadot/node/network/collator-protocol/src/lib.rs
index c958289133604c8b999b1cfaff3c94fb9b6fa764..96af19aa1e58cf92496005754f863c68ec046e19 100644
--- a/polkadot/node/network/collator-protocol/src/lib.rs
+++ b/polkadot/node/network/collator-protocol/src/lib.rs
@@ -28,9 +28,16 @@ use sp_keystore::SyncCryptoStorePtr;
 
 use polkadot_node_network_protocol::{PeerId, UnifiedReputationChange as Rep};
 use polkadot_primitives::v1::CollatorPair;
+
 use polkadot_subsystem::{
-	messages::{AllMessages, CollatorProtocolMessage, NetworkBridgeMessage},
-	SpawnedSubsystem, Subsystem, SubsystemContext, SubsystemError,
+	SpawnedSubsystem,
+	SubsystemContext,
+	SubsystemSender,
+	overseer,
+	messages::{
+		CollatorProtocolMessage, NetworkBridgeMessage,
+	},
+	errors::SubsystemError,
 };
 
 mod error;
@@ -92,7 +99,8 @@ impl CollatorProtocolSubsystem {
 
 	async fn run<Context>(self, ctx: Context) -> Result<()>
 	where
-		Context: SubsystemContext<Message = CollatorProtocolMessage>,
+		Context: overseer::SubsystemContext<Message=CollatorProtocolMessage>,
+		Context: SubsystemContext<Message=CollatorProtocolMessage>,
 	{
 		match self.protocol_side {
 			ProtocolSide::Validator { keystore, eviction_policy, metrics } => validator_side::run(
@@ -111,9 +119,11 @@ impl CollatorProtocolSubsystem {
 	}
 }
 
-impl<Context> Subsystem<Context> for CollatorProtocolSubsystem
+impl<Context> overseer::Subsystem<Context, SubsystemError> for CollatorProtocolSubsystem
 where
-	Context: SubsystemContext<Message = CollatorProtocolMessage> + Sync + Send,
+	Context: SubsystemContext<Message = CollatorProtocolMessage>,
+	Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
+	<Context as SubsystemContext>::Sender: SubsystemSender,
 {
 	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		let future = self
@@ -140,7 +150,7 @@ where
 		"reputation change for peer",
 	);
 
-	ctx.send_message(AllMessages::NetworkBridge(
+	ctx.send_message(
 		NetworkBridgeMessage::ReportPeer(peer, rep),
-	)).await;
+	).await;
 }
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
index 128733088ba797e4925258dd2e2ddefd7bf6b02b..bd6d9cc86ba6ee88e3dc02b0d4e12dd8c8264cf8 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
@@ -40,9 +40,10 @@ use polkadot_node_primitives::{SignedFullStatement, PoV};
 use polkadot_node_subsystem_util::metrics::{self, prometheus};
 use polkadot_primitives::v1::{CandidateReceipt, CollatorId, Hash, Id as ParaId};
 use polkadot_subsystem::{
+	overseer,
 	jaeger,
 	messages::{
-		AllMessages, CollatorProtocolMessage, IfDisconnected,
+		CollatorProtocolMessage, IfDisconnected,
 		NetworkBridgeEvent, NetworkBridgeMessage, CandidateBackingMessage,
 	},
 	FromOverseer, OverseerSignal, PerLeafSpan, SubsystemContext, SubsystemSender,
@@ -583,19 +584,27 @@ fn collator_peer_id(
 		)
 }
 
-async fn disconnect_peer(ctx: &mut impl SubsystemContext, peer_id: PeerId) {
+async fn disconnect_peer<Context>(ctx: &mut Context, peer_id: PeerId)
+where
+	Context: overseer::SubsystemContext<Message=CollatorProtocolMessage>,
+	Context: SubsystemContext<Message=CollatorProtocolMessage>,
+{
 	ctx.send_message(
-		NetworkBridgeMessage::DisconnectPeer(peer_id, PeerSet::Collation).into()
+		NetworkBridgeMessage::DisconnectPeer(peer_id, PeerSet::Collation)
 	).await
 }
 
 /// Another subsystem has requested to fetch collations on a particular leaf for some para.
-async fn fetch_collation(
-	ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
+async fn fetch_collation<Context>(
+	ctx: &mut Context,
 	state: &mut State,
 	pc: PendingCollation,
 	id: CollatorId,
-) {
+)
+where
+	Context: overseer::SubsystemContext<Message=CollatorProtocolMessage>,
+	Context: SubsystemContext<Message=CollatorProtocolMessage>,
+{
 	let (tx, rx) = oneshot::channel();
 
 	let PendingCollation { relay_parent, para_id, peer_id, .. } = pc;
@@ -627,7 +636,8 @@ async fn note_good_collation<Context>(
 	id: CollatorId,
 )
 where
-	Context: SubsystemContext<Message = CollatorProtocolMessage>
+	Context: overseer::SubsystemContext<Message=CollatorProtocolMessage>,
+	Context: SubsystemContext<Message=CollatorProtocolMessage>,
 {
 	if let Some(peer_id) = collator_peer_id(peer_data, &id) {
 		modify_reputation(ctx, peer_id, BENEFIT_NOTIFY_GOOD).await;
@@ -635,19 +645,23 @@ where
 }
 
 /// Notify a collator that its collation got seconded.
-async fn notify_collation_seconded(
-	ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
+async fn notify_collation_seconded<Context>(
+	ctx: &mut Context,
 	peer_id: PeerId,
 	relay_parent: Hash,
 	statement: SignedFullStatement,
-) {
+)
+where
+	Context: overseer::SubsystemContext<Message=CollatorProtocolMessage>,
+	Context: SubsystemContext<Message=CollatorProtocolMessage>,
+{
 	let wire_message = protocol_v1::CollatorProtocolMessage::CollationSeconded(relay_parent, statement.into());
-	ctx.send_message(AllMessages::NetworkBridge(
+	ctx.send_message(
 		NetworkBridgeMessage::SendCollationMessage(
 			vec![peer_id],
 			protocol_v1::CollationProtocol::CollatorProtocol(wire_message),
 		)
-	)).await;
+	).await;
 
 	modify_reputation(ctx, peer_id, BENEFIT_NOTIFY_GOOD).await;
 }
@@ -684,7 +698,8 @@ async fn request_collation<Context>(
 	result: oneshot::Sender<(CandidateReceipt, PoV)>,
 )
 where
-	Context: SubsystemContext<Message = CollatorProtocolMessage>
+	Context: overseer::SubsystemContext<Message=CollatorProtocolMessage>,
+	Context: SubsystemContext<Message=CollatorProtocolMessage>,
 {
 	if !state.view.contains(&relay_parent) {
 		tracing::debug!(
@@ -737,8 +752,8 @@ where
 		"Requesting collation",
 	);
 
-	ctx.send_message(AllMessages::NetworkBridge(
-		NetworkBridgeMessage::SendRequests(vec![requests], IfDisconnected::ImmediateError))
+	ctx.send_message(
+		NetworkBridgeMessage::SendRequests(vec![requests], IfDisconnected::ImmediateError)
 	).await;
 }
 
@@ -750,7 +765,8 @@ async fn process_incoming_peer_message<Context>(
 	msg: protocol_v1::CollatorProtocolMessage,
 )
 where
-	Context: SubsystemContext<Message = CollatorProtocolMessage>
+	Context: overseer::SubsystemContext<Message=CollatorProtocolMessage>,
+	Context: SubsystemContext<Message=CollatorProtocolMessage>,
 {
 	use protocol_v1::CollatorProtocolMessage::*;
 	use sp_runtime::traits::AppVerify;
@@ -897,12 +913,16 @@ async fn remove_relay_parent(
 }
 
 /// Our view has changed.
-async fn handle_our_view_change(
-	ctx: &mut impl SubsystemContext,
+async fn handle_our_view_change<Context>(
+	ctx: &mut Context,
 	state: &mut State,
 	keystore: &SyncCryptoStorePtr,
 	view: OurView,
-) -> Result<()> {
+) -> Result<()>
+where
+	Context: overseer::SubsystemContext<Message=CollatorProtocolMessage>,
+	Context: SubsystemContext<Message=CollatorProtocolMessage>,
+{
 	let old_view = std::mem::replace(&mut state.view, view);
 
 	let added: HashMap<Hash, Arc<jaeger::Span>> = state.view
@@ -955,7 +975,8 @@ async fn handle_network_msg<Context>(
 	bridge_message: NetworkBridgeEvent<protocol_v1::CollatorProtocolMessage>,
 ) -> Result<()>
 where
-	Context: SubsystemContext<Message = CollatorProtocolMessage>
+	Context: overseer::SubsystemContext<Message=CollatorProtocolMessage>,
+	Context: SubsystemContext<Message=CollatorProtocolMessage>,
 {
 	use NetworkBridgeEvent::*;
 
@@ -993,7 +1014,8 @@ async fn process_msg<Context>(
 	state: &mut State,
 )
 where
-	Context: SubsystemContext<Message = CollatorProtocolMessage>
+	Context: overseer::SubsystemContext<Message=CollatorProtocolMessage>,
+	Context: SubsystemContext<Message=CollatorProtocolMessage>,
 {
 	use CollatorProtocolMessage::*;
 
@@ -1101,9 +1123,10 @@ pub(crate) async fn run<Context>(
 	eviction_policy: crate::CollatorEvictionPolicy,
 	metrics: Metrics,
 ) -> Result<()>
-	where Context: SubsystemContext<Message = CollatorProtocolMessage>
+where
+	Context: overseer::SubsystemContext<Message=CollatorProtocolMessage>,
+	Context: SubsystemContext<Message=CollatorProtocolMessage>,
 {
-	use FromOverseer::*;
 	use OverseerSignal::*;
 
 	let mut state = State {
@@ -1122,7 +1145,7 @@ pub(crate) async fn run<Context>(
 		select! {
 			res = ctx.recv().fuse() => {
 				match res {
-					Ok(Communication { msg }) => {
+					Ok(FromOverseer::Communication { msg }) => {
 						tracing::trace!(target: LOG_TARGET, msg = ?msg, "received a message");
 						process_msg(
 							&mut ctx,
@@ -1131,7 +1154,7 @@ pub(crate) async fn run<Context>(
 							&mut state,
 						).await;
 					}
-					Ok(Signal(Conclude)) => break,
+					Ok(FromOverseer::Signal(Conclude)) => break,
 					_ => {},
 				}
 			}
@@ -1159,11 +1182,15 @@ pub(crate) async fn run<Context>(
 }
 
 /// Handle a fetched collation result.
-async fn handle_collation_fetched_result(
-	ctx: &mut impl SubsystemContext<Message = CollatorProtocolMessage>,
+async fn handle_collation_fetched_result<Context>(
+	ctx: &mut Context,
 	state: &mut State,
 	(mut collation_event, res): PendingCollationFetch,
-) {
+)
+where
+	Context: overseer::SubsystemContext<Message=CollatorProtocolMessage>,
+	Context: SubsystemContext<Message=CollatorProtocolMessage>,
+{
 	// If no prior collation for this relay parent has been seconded, then
 	// memoize the collation_event for that relay_parent, such that we may
 	// notify the collator of their successful second backing
@@ -1204,7 +1231,7 @@ async fn handle_collation_fetched_result(
 				relay_parent.clone(),
 				candidate_receipt,
 				pov,
-			).into()
+			)
 		).await;
 
 		entry.insert(collation_event);
@@ -1221,11 +1248,15 @@ async fn handle_collation_fetched_result(
 // This issues `NetworkBridge` notifications to disconnect from all inactive peers at the
 // earliest possible point. This does not yet clean up any metadata, as that will be done upon
 // receipt of the `PeerDisconnected` event.
-async fn disconnect_inactive_peers(
-	ctx: &mut impl SubsystemContext,
+async fn disconnect_inactive_peers<Context>(
+	ctx: &mut Context,
 	eviction_policy: &crate::CollatorEvictionPolicy,
 	peers: &HashMap<PeerId, PeerData>,
-) {
+)
+where
+	Context: overseer::SubsystemContext<Message=CollatorProtocolMessage>,
+	Context: SubsystemContext<Message=CollatorProtocolMessage>,
+{
 	for (peer, peer_data) in peers {
 		if peer_data.is_inactive(&eviction_policy) {
 			disconnect_peer(ctx, peer.clone()).await;
@@ -1248,7 +1279,8 @@ async fn poll_collation_response<Context>(
 )
 -> bool
 where
-	Context: SubsystemContext
+	Context: overseer::SubsystemContext<Message=CollatorProtocolMessage>,
+	Context: SubsystemContext,
 {
 	if never!(per_req.from_collator.is_terminated()) {
 		tracing::error!(
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests.rs
index fb49bdfa2c56f2a8ca7f5cd6499c4072c1198356..86076407c77680fa7f7cdd45cd209bd456dc2a8f 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/tests.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/tests.rs
@@ -31,7 +31,7 @@ use polkadot_primitives::v1::{
 use polkadot_node_primitives::BlockData;
 use polkadot_node_subsystem_util::TimeoutExt;
 use polkadot_subsystem_testhelpers as test_helpers;
-use polkadot_subsystem::messages::{RuntimeApiMessage, RuntimeApiRequest};
+use polkadot_subsystem::messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest};
 use polkadot_node_network_protocol::{
 	our_view, ObservedRole, request_response::{Requests, ResponseSender},
 };
diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs
index adb9fa6e34f811d0b3261ac529f07e33879577e8..fc56e75febd7ffb5e82f9515f2a1bb2d01a94414 100644
--- a/polkadot/node/network/gossip-support/src/lib.rs
+++ b/polkadot/node/network/gossip-support/src/lib.rs
@@ -29,12 +29,16 @@ use futures::{channel::oneshot, FutureExt as _};
 use rand::{SeedableRng, seq::SliceRandom as _};
 use rand_chacha::ChaCha20Rng;
 use polkadot_node_subsystem::{
+	overseer,
+	SubsystemError,
+	FromOverseer, SpawnedSubsystem, SubsystemContext,
 	messages::{
-		AllMessages, GossipSupportMessage, NetworkBridgeMessage,
-		RuntimeApiMessage, RuntimeApiRequest,
+		GossipSupportMessage,
+		NetworkBridgeMessage,
+		RuntimeApiMessage,
+		RuntimeApiRequest,
 	},
-	ActiveLeavesUpdate, FromOverseer, OverseerSignal,
-	Subsystem, SpawnedSubsystem, SubsystemContext,
+	ActiveLeavesUpdate, OverseerSignal,
 };
 use polkadot_node_subsystem_util as util;
 use polkadot_primitives::v1::{
@@ -94,6 +98,7 @@ impl GossipSupport {
 	async fn run<Context>(self, ctx: Context)
 	where
 		Context: SubsystemContext<Message = GossipSupportMessage>,
+		Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
 	{
 		let mut state = State::default();
 		self.run_inner(ctx, &mut state).await;
@@ -102,6 +107,7 @@ impl GossipSupport {
 	async fn run_inner<Context>(self, mut ctx: Context, state: &mut State)
 	where
 		Context: SubsystemContext<Message = GossipSupportMessage>,
+		Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
 	{
 		let Self { keystore } = self;
 		loop {
@@ -138,10 +144,14 @@ impl GossipSupport {
 	}
 }
 
-async fn determine_relevant_authorities(
-	ctx: &mut impl SubsystemContext,
+async fn determine_relevant_authorities<Context>(
+	ctx: &mut Context,
 	relay_parent: Hash,
-) -> Result<Vec<AuthorityDiscoveryId>, util::Error> {
+) -> Result<Vec<AuthorityDiscoveryId>, util::Error>
+where
+	Context: SubsystemContext<Message = GossipSupportMessage>,
+	Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
+{
 	let authorities = util::request_authorities(relay_parent, ctx.sender()).await.await??;
 	tracing::debug!(
 		target: LOG_TARGET,
@@ -169,19 +179,23 @@ async fn ensure_i_am_an_authority(
 }
 
 /// A helper function for making a `ConnectToValidators` request.
-async fn connect_to_authorities(
-	ctx: &mut impl SubsystemContext,
+async fn connect_to_authorities<Context>(
+	ctx: &mut Context,
 	validator_ids: Vec<AuthorityDiscoveryId>,
 	peer_set: PeerSet,
-) -> oneshot::Receiver<usize> {
+) -> oneshot::Receiver<usize>
+where
+	Context: SubsystemContext<Message = GossipSupportMessage>,
+	Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
+{
 	let (failed, failed_rx) = oneshot::channel();
-	ctx.send_message(AllMessages::NetworkBridge(
+	ctx.send_message(
 		NetworkBridgeMessage::ConnectToValidators {
 			validator_ids,
 			peer_set,
 			failed,
 		}
-	)).await;
+	).await;
 	failed_rx
 }
 
@@ -193,12 +207,16 @@ async fn connect_to_authorities(
 /// This limits the amount of gossip peers to 2 * sqrt(len) and ensures the diameter of 2.
 ///
 /// [web3]: https://research.web3.foundation/en/latest/polkadot/networking/3-avail-valid.html#topology
-async fn update_gossip_topology(
-	ctx: &mut impl SubsystemContext,
+async fn update_gossip_topology<Context>(
+	ctx: &mut Context,
 	our_index: usize,
 	authorities: Vec<AuthorityDiscoveryId>,
 	relay_parent: Hash,
-) -> Result<(), util::Error> {
+) -> Result<(), util::Error>
+where
+	Context: SubsystemContext<Message = GossipSupportMessage>,
+	Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
+{
 	// retrieve BABE randomness
 	let random_seed = {
 		let (tx, rx) = oneshot::channel();
@@ -206,7 +224,7 @@ async fn update_gossip_topology(
 		ctx.send_message(RuntimeApiMessage::Request(
 			relay_parent,
 			RuntimeApiRequest::CurrentBabeEpoch(tx),
-		).into()).await;
+		)).await;
 
 		let randomness = rx.await??.randomness;
 		let mut subject = [0u8; 40];
@@ -227,11 +245,11 @@ async fn update_gossip_topology(
 	let neighbors = matrix_neighbors(our_shuffled_position, len);
 	let our_neighbors = neighbors.map(|i| authorities[indices[i]].clone()).collect();
 
-	ctx.send_message(AllMessages::NetworkBridge(
+	ctx.send_message(
 		NetworkBridgeMessage::NewGossipTopology {
 			our_neighbors,
 		}
-	)).await;
+	).await;
 
 	Ok(())
 }
@@ -262,12 +280,16 @@ impl State {
 	/// 1. Determine if the current session index has changed.
 	/// 2. If it has, determine relevant validators
 	///    and issue a connection request.
-	async fn handle_active_leaves(
+	async fn handle_active_leaves<Context>(
 		&mut self,
-		ctx: &mut impl SubsystemContext,
+		ctx: &mut Context,
 		keystore: &SyncCryptoStorePtr,
 		leaves: impl Iterator<Item = Hash>,
-	) -> Result<(), util::Error> {
+	) -> Result<(), util::Error>
+	where
+		Context: SubsystemContext<Message = GossipSupportMessage>,
+		Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
+	{
 		for leaf in leaves {
 			let current_index = util::request_session_index_for_child(leaf, ctx.sender()).await.await??;
 			let since_failure = self.last_failure.map(|i| i.elapsed()).unwrap_or_default();
@@ -310,11 +332,15 @@ impl State {
 		Ok(())
 	}
 
-	async fn issue_connection_request(
+	async fn issue_connection_request<Context>(
 		&mut self,
-		ctx: &mut impl SubsystemContext,
+		ctx: &mut Context,
 		authorities: Vec<AuthorityDiscoveryId>,
-	) -> Result<(), util::Error> {
+	) -> Result<(), util::Error>
+	where
+		Context: SubsystemContext<Message = GossipSupportMessage>,
+		Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
+	{
 		let num = authorities.len();
 		tracing::debug!(target: LOG_TARGET, %num, "Issuing a connection request");
 
@@ -362,9 +388,10 @@ impl State {
 	}
 }
 
-impl<Context> Subsystem<Context> for GossipSupport
+impl<Context> overseer::Subsystem<Context, SubsystemError> for GossipSupport
 where
-	Context: SubsystemContext<Message = GossipSupportMessage> + Sync + Send,
+	Context: SubsystemContext<Message = GossipSupportMessage>,
+	Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
 {
 	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		let future = self.run(ctx)
diff --git a/polkadot/node/network/gossip-support/src/tests.rs b/polkadot/node/network/gossip-support/src/tests.rs
index 7f96f4caf7ac0f943b66e3ea960e2fcdefbb6fbf..8d80d84d9b0b9e3464d464f7e112603e71c8deae 100644
--- a/polkadot/node/network/gossip-support/src/tests.rs
+++ b/polkadot/node/network/gossip-support/src/tests.rs
@@ -19,7 +19,7 @@
 use super::*;
 use polkadot_node_subsystem::{
 	jaeger, ActivatedLeaf, LeafStatus,
-	messages::{RuntimeApiMessage, RuntimeApiRequest},
+	messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest},
 };
 use polkadot_node_subsystem_test_helpers as test_helpers;
 use polkadot_node_subsystem_util::TimeoutExt as _;
diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs
index acd24f7156bfd00aa5fc133311488db426fb85a1..058c2dbab14fc2f3f5f77dfc460f844da21c48c2 100644
--- a/polkadot/node/network/protocol/src/lib.rs
+++ b/polkadot/node/network/protocol/src/lib.rs
@@ -87,13 +87,15 @@ impl Into<sc_network::ObservedRole> for ObservedRole {
 	}
 }
 
+/// Implement `TryFrom` for one enum variant into the inner type.
+/// `$m_ty::$variant(inner) -> Ok(inner)`
 macro_rules! impl_try_from {
 	($m_ty:ident, $variant:ident, $out:ty) => {
 		impl TryFrom<$m_ty> for $out {
 			type Error = crate::WrongVariant;
 
-			#[allow(unreachable_patterns)] // when there is only one variant
 			fn try_from(x: $m_ty) -> Result<$out, Self::Error> {
+				#[allow(unreachable_patterns)] // when there is only one variant
 				match x {
 					$m_ty::$variant(y) => Ok(y),
 					_ => Err(crate::WrongVariant),
diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs
index 59c9cf249ba05fb683908e99e80c4ef1f6703747..6588d996ce2215a4737a164bcc7fe403fa6f7ab6 100644
--- a/polkadot/node/network/statement-distribution/src/lib.rs
+++ b/polkadot/node/network/statement-distribution/src/lib.rs
@@ -26,7 +26,8 @@ use error::{FatalResult, NonFatalResult, log_error};
 use parity_scale_codec::Encode;
 
 use polkadot_subsystem::{
-	ActiveLeavesUpdate, FromOverseer, OverseerSignal, PerLeafSpan, SpawnedSubsystem, Subsystem,
+	overseer,
+	ActiveLeavesUpdate, FromOverseer, OverseerSignal, PerLeafSpan, SpawnedSubsystem,
 	SubsystemContext, SubsystemError, jaeger,
 	messages::{
 		AllMessages, NetworkBridgeMessage, StatementDistributionMessage,
@@ -107,10 +108,12 @@ pub struct StatementDistribution {
 	metrics: Metrics,
 }
 
-impl<C> Subsystem<C> for StatementDistribution
-	where C: SubsystemContext<Message=StatementDistributionMessage>
+impl<Context> overseer::Subsystem<Context, SubsystemError> for StatementDistribution
+where
+	Context: SubsystemContext<Message=StatementDistributionMessage>,
+	Context: overseer::SubsystemContext<Message=StatementDistributionMessage>,
 {
-	fn start(self, ctx: C) -> SpawnedSubsystem {
+	fn start(self, ctx: Context) -> SpawnedSubsystem {
 		// Swallow error because failure is fatal to the node and we log with more precision
 		// within `run`.
 		SpawnedSubsystem {
@@ -588,7 +591,7 @@ enum Message {
 
 impl Message {
 	async fn receive(
-		ctx: &mut impl SubsystemContext<Message = StatementDistributionMessage>,
+		ctx: &mut (impl SubsystemContext<Message = StatementDistributionMessage> + overseer::SubsystemContext<Message = StatementDistributionMessage>),
 		from_requester: &mut mpsc::Receiver<RequesterMessage>,
 		from_responder: &mut mpsc::Receiver<ResponderMessage>,
 	) -> Message {
@@ -846,7 +849,7 @@ async fn circulate_statement_and_dependents(
 	gossip_peers: &HashSet<PeerId>,
 	peers: &mut HashMap<PeerId, PeerData>,
 	active_heads: &mut HashMap<Hash, ActiveHeadData>,
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	relay_parent: Hash,
 	statement: SignedFullStatement,
 	priority_peers: Vec<PeerId>,
@@ -953,7 +956,7 @@ fn is_statement_large(statement: &SignedFullStatement) -> bool {
 async fn circulate_statement<'a>(
 	gossip_peers: &HashSet<PeerId>,
 	peers: &mut HashMap<PeerId, PeerData>,
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	relay_parent: Hash,
 	stored: StoredStatement<'a>,
 	mut priority_peers: Vec<PeerId>,
@@ -1034,7 +1037,7 @@ async fn circulate_statement<'a>(
 async fn send_statements_about(
 	peer: PeerId,
 	peer_data: &mut PeerData,
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	relay_parent: Hash,
 	candidate_hash: CandidateHash,
 	active_head: &ActiveHeadData,
@@ -1071,7 +1074,7 @@ async fn send_statements_about(
 async fn send_statements(
 	peer: PeerId,
 	peer_data: &mut PeerData,
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	relay_parent: Hash,
 	active_head: &ActiveHeadData,
 	metrics: &Metrics,
@@ -1103,7 +1106,7 @@ async fn send_statements(
 }
 
 async fn report_peer(
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	peer: PeerId,
 	rep: Rep,
 ) {
@@ -1123,7 +1126,7 @@ async fn retrieve_statement_from_message<'a>(
 	peer: PeerId,
 	message: protocol_v1::StatementDistributionMessage,
 	active_head: &'a mut ActiveHeadData,
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	req_sender: &mpsc::Sender<RequesterMessage>,
 	metrics: &Metrics,
 ) -> Option<UncheckedSignedFullStatement> {
@@ -1225,7 +1228,7 @@ async fn launch_request(
 	meta: StatementMetadata,
 	peer: PeerId,
 	req_sender: mpsc::Sender<RequesterMessage>,
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	metrics: &Metrics,
 ) -> Option<LargeStatementStatus> {
 
@@ -1263,7 +1266,7 @@ async fn handle_incoming_message_and_circulate<'a>(
 	gossip_peers: &HashSet<PeerId>,
 	peers: &mut HashMap<PeerId, PeerData>,
 	active_heads: &'a mut HashMap<Hash, ActiveHeadData>,
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	message: protocol_v1::StatementDistributionMessage,
 	req_sender: &mpsc::Sender<RequesterMessage>,
 	metrics: &Metrics,
@@ -1312,7 +1315,7 @@ async fn handle_incoming_message<'a>(
 	peer: PeerId,
 	peer_data: &mut PeerData,
 	active_heads: &'a mut HashMap<Hash, ActiveHeadData>,
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	message: protocol_v1::StatementDistributionMessage,
 	req_sender: &mpsc::Sender<RequesterMessage>,
 	metrics: &Metrics,
@@ -1447,10 +1450,7 @@ async fn handle_incoming_message<'a>(
 
 			// When we receive a new message from a peer, we forward it to the
 			// candidate backing subsystem.
-			let message = AllMessages::CandidateBacking(
-				CandidateBackingMessage::Statement(relay_parent, statement.statement.clone())
-			);
-			ctx.send_message(message).await;
+			ctx.send_message(CandidateBackingMessage::Statement(relay_parent, statement.statement.clone())).await;
 
 			Some((relay_parent, statement))
 		}
@@ -1462,7 +1462,7 @@ async fn update_peer_view_and_maybe_send_unlocked(
 	peer: PeerId,
 	gossip_peers: &HashSet<PeerId>,
 	peer_data: &mut PeerData,
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	active_heads: &HashMap<Hash, ActiveHeadData>,
 	new_view: View,
 	metrics: &Metrics,
@@ -1506,7 +1506,7 @@ async fn handle_network_update(
 	gossip_peers: &mut HashSet<PeerId>,
 	authorities: &mut HashMap<AuthorityDiscoveryId, PeerId>,
 	active_heads: &mut HashMap<Hash, ActiveHeadData>,
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	req_sender: &mpsc::Sender<RequesterMessage>,
 	update: NetworkBridgeEvent<protocol_v1::StatementDistributionMessage>,
 	metrics: &Metrics,
@@ -1599,7 +1599,7 @@ async fn handle_network_update(
 impl StatementDistribution {
 	async fn run(
 		self,
-		mut ctx: impl SubsystemContext<Message = StatementDistributionMessage>,
+		mut ctx: (impl SubsystemContext<Message = StatementDistributionMessage> + overseer::SubsystemContext<Message = StatementDistributionMessage>),
 	) -> std::result::Result<(), Fatal> {
 		let mut peers: HashMap<PeerId, PeerData> = HashMap::new();
 		let mut gossip_peers: HashSet<PeerId> = HashSet::new();
@@ -1831,7 +1831,7 @@ impl StatementDistribution {
 
 	async fn handle_subsystem_message(
 		&self,
-		ctx: &mut impl SubsystemContext,
+		ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 		runtime: &mut RuntimeInfo,
 		peers: &mut HashMap<PeerId, PeerData>,
 		gossip_peers: &mut HashSet<PeerId>,
diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml
index 0d83f5da41506a077e4d37639a9e183329ade5c6..76f96e0a4182608ce4c10fabb79d9470671839b3 100644
--- a/polkadot/node/overseer/Cargo.toml
+++ b/polkadot/node/overseer/Cargo.toml
@@ -10,17 +10,21 @@ client = { package = "sc-client-api", git = "https://github.com/paritytech/subst
 sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
 futures = "0.3.15"
 futures-timer = "3.0.2"
-polkadot-node-primitives = { package = "polkadot-node-primitives", path = "../primitives" }
-polkadot-node-subsystem-util = { path = "../subsystem-util" }
-polkadot-procmacro-overseer-subsystems-gen = { path = "./subsystems-gen" }
+polkadot-node-network-protocol = { path = "../network/protocol" }
+polkadot-node-primitives = { path = "../primitives" }
+polkadot-node-subsystem-types = { path = "../subsystem-types" }
+polkadot-node-metrics = { path = "../metrics" }
 polkadot-primitives = { path = "../../primitives" }
-polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../subsystem" }
+polkadot-overseer-gen = { path = "./overseer-gen" }
+polkadot-overseer-all-subsystems-gen = { path = "./all-subsystems-gen" }
 tracing = "0.1.26"
 lru = "0.6"
 
 [dev-dependencies]
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
 polkadot-node-network-protocol = { path = "../network/protocol" }
+polkadot-node-metrics = { path = "../metrics" }
+metered-channel = { path = "../metered-channel" }
 futures = { version = "0.3.15", features = ["thread-pool"] }
 femme = "2.1.1"
 kv-log-macro = "1.0.7"
diff --git a/polkadot/node/overseer/subsystems-gen/Cargo.toml b/polkadot/node/overseer/all-subsystems-gen/Cargo.toml
similarity index 88%
rename from polkadot/node/overseer/subsystems-gen/Cargo.toml
rename to polkadot/node/overseer/all-subsystems-gen/Cargo.toml
index 9d386083e89a54f5f6f8697402c63b90448f7a70..957a086cf2f3bbe88f162ab23929ccb92a19e16e 100644
--- a/polkadot/node/overseer/subsystems-gen/Cargo.toml
+++ b/polkadot/node/overseer/all-subsystems-gen/Cargo.toml
@@ -1,5 +1,5 @@
 [package]
-name = "polkadot-procmacro-overseer-subsystems-gen"
+name = "polkadot-overseer-all-subsystems-gen"
 version = "0.1.0"
 authors = ["Parity Technologies <admin@parity.io>"]
 edition = "2018"
diff --git a/polkadot/node/overseer/subsystems-gen/src/lib.rs b/polkadot/node/overseer/all-subsystems-gen/src/lib.rs
similarity index 100%
rename from polkadot/node/overseer/subsystems-gen/src/lib.rs
rename to polkadot/node/overseer/all-subsystems-gen/src/lib.rs
diff --git a/polkadot/node/overseer/subsystems-gen/tests/ui/err-01-enum.rs b/polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-enum.rs
similarity index 70%
rename from polkadot/node/overseer/subsystems-gen/tests/ui/err-01-enum.rs
rename to polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-enum.rs
index 318636279ea5ba5323affed9792083a3a7790ad6..ffcbecd0b3f454c8942e855eb25f034dd36b914b 100644
--- a/polkadot/node/overseer/subsystems-gen/tests/ui/err-01-enum.rs
+++ b/polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-enum.rs
@@ -1,6 +1,6 @@
 #![allow(dead_code)]
 
-use polkadot_procmacro_overseer_subsystems_gen::AllSubsystemsGen;
+use polkadot_overseer_all_subsystems_gen::AllSubsystemsGen;
 
 #[derive(Clone, AllSubsystemsGen)]
 enum AllSubsystems<A,B> {
diff --git a/polkadot/node/overseer/subsystems-gen/tests/ui/err-01-enum.stderr b/polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-enum.stderr
similarity index 100%
rename from polkadot/node/overseer/subsystems-gen/tests/ui/err-01-enum.stderr
rename to polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-enum.stderr
diff --git a/polkadot/node/overseer/subsystems-gen/tests/ui/err-01-generic-used-twice.rs b/polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-generic-used-twice.rs
similarity index 75%
rename from polkadot/node/overseer/subsystems-gen/tests/ui/err-01-generic-used-twice.rs
rename to polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-generic-used-twice.rs
index f89939d5c306edffddca742d530ddc6207f006af..5c80dca787eab219f1e7fbbd5f2fa205de382b78 100644
--- a/polkadot/node/overseer/subsystems-gen/tests/ui/err-01-generic-used-twice.rs
+++ b/polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-generic-used-twice.rs
@@ -1,7 +1,6 @@
 #![allow(dead_code)]
 
-use polkadot_procmacro_overseer_subsystems_gen::AllSubsystemsGen;
-
+use polkadot_overseer_all_subsystems_gen::AllSubsystemsGen;
 #[derive(Clone, AllSubsystemsGen)]
 struct AllSubsystems<X> {
 	a: X,
diff --git a/polkadot/node/overseer/subsystems-gen/tests/ui/err-01-generic-used-twice.stderr b/polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-generic-used-twice.stderr
similarity index 67%
rename from polkadot/node/overseer/subsystems-gen/tests/ui/err-01-generic-used-twice.stderr
rename to polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-generic-used-twice.stderr
index 23e1404ff822c7ee9c5419300a27fe2130e2e804..019dc42aae0075480c24076c3d7c8c6d1721cd92 100644
--- a/polkadot/node/overseer/subsystems-gen/tests/ui/err-01-generic-used-twice.stderr
+++ b/polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-generic-used-twice.stderr
@@ -1,14 +1,14 @@
 error: Generic type parameters may only be used for exactly one field, but is used more than once.
- --> $DIR/err-01-generic-used-twice.rs:7:5
+ --> $DIR/err-01-generic-used-twice.rs:6:5
   |
-7 |     a: X,
+6 |     a: X,
   |        ^
 
 error[E0599]: no method named `replace_a` found for struct `AllSubsystems<u16>` in the current scope
-  --> $DIR/err-01-generic-used-twice.rs:16:17
+  --> $DIR/err-01-generic-used-twice.rs:15:17
    |
-6  | struct AllSubsystems<X> {
+5  | struct AllSubsystems<X> {
    | ----------------------- method `replace_a` not found for this
 ...
-16 |     let _all = all.replace_a(77u8);
+15 |     let _all = all.replace_a(77u8);
    |                    ^^^^^^^^^ method not found in `AllSubsystems<u16>`
diff --git a/polkadot/node/overseer/subsystems-gen/tests/ui/err-01-no-generic.rs b/polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-no-generic.rs
similarity index 75%
rename from polkadot/node/overseer/subsystems-gen/tests/ui/err-01-no-generic.rs
rename to polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-no-generic.rs
index 0466eb444cd962d97f0dcc326526d06366a04e7a..2a231295489084d247ba9582288a0101e23ada55 100644
--- a/polkadot/node/overseer/subsystems-gen/tests/ui/err-01-no-generic.rs
+++ b/polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-no-generic.rs
@@ -1,6 +1,6 @@
 #![allow(dead_code)]
 
-use polkadot_procmacro_overseer_subsystems_gen::AllSubsystemsGen;
+use polkadot_overseer_all_subsystems_gen::AllSubsystemsGen;
 
 #[derive(Clone, AllSubsystemsGen)]
 struct AllSubsystems {
diff --git a/polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-no-generic.stderr b/polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-no-generic.stderr
new file mode 100644
index 0000000000000000000000000000000000000000..f8eb8e56c3acd7151bbf26c0fbf3eaf1d43ce3a5
--- /dev/null
+++ b/polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-no-generic.stderr
@@ -0,0 +1,14 @@
+error: struct must have at least one generic parameter.
+ --> $DIR/err-01-no-generic.rs:6:8
+  |
+6 | struct AllSubsystems {
+  |        ^^^^^^^^^^^^^
+
+error[E0599]: no method named `replace_a` found for struct `AllSubsystems` in the current scope
+  --> $DIR/err-01-no-generic.rs:16:17
+   |
+6  | struct AllSubsystems {
+   | -------------------- method `replace_a` not found for this
+...
+16 |     let _all = all.replace_a(77u8);
+   |                    ^^^^^^^^^ method not found in `AllSubsystems`
diff --git a/polkadot/node/overseer/subsystems-gen/tests/ui/err-01-no-generics.stderr b/polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-no-generics.stderr
similarity index 100%
rename from polkadot/node/overseer/subsystems-gen/tests/ui/err-01-no-generics.stderr
rename to polkadot/node/overseer/all-subsystems-gen/tests/ui/err-01-no-generics.stderr
diff --git a/polkadot/node/overseer/all-subsystems-gen/tests/ui/ok-01-w-generics.rs b/polkadot/node/overseer/all-subsystems-gen/tests/ui/ok-01-w-generics.rs
new file mode 100644
index 0000000000000000000000000000000000000000..370c4a872e25374fdd86c3e7eae52a0225c57a4e
--- /dev/null
+++ b/polkadot/node/overseer/all-subsystems-gen/tests/ui/ok-01-w-generics.rs
@@ -0,0 +1,17 @@
+#![allow(dead_code)]
+
+use polkadot_overseer_all_subsystems_gen::AllSubsystemsGen;
+
+#[derive(Clone, AllSubsystemsGen)]
+struct AllSubsystems<A, B> {
+	a: A,
+	b: B,
+}
+
+fn main() {
+	let all = AllSubsystems::<u8, u16> {
+		a: 0u8,
+		b: 1u16,
+	};
+	let _all: AllSubsystems<_,_> = all.replace_a::<u32>(777_777u32);
+}
diff --git a/polkadot/node/overseer/examples/minimal-example.rs b/polkadot/node/overseer/examples/minimal-example.rs
index bd3170fa13673b55066b7bffb4eb29ac56a68818..71a703f9933649c084473953136cadbb9f7429b8 100644
--- a/polkadot/node/overseer/examples/minimal-example.rs
+++ b/polkadot/node/overseer/examples/minimal-example.rs
@@ -28,11 +28,21 @@ use futures_timer::Delay;
 
 use polkadot_node_primitives::{PoV, BlockData};
 use polkadot_primitives::v1::Hash;
-use polkadot_overseer::{Overseer, HeadSupportsParachains, AllSubsystems};
-
-use polkadot_subsystem::{Subsystem, SubsystemContext, SpawnedSubsystem, FromOverseer};
-use polkadot_subsystem::messages::{
-	CandidateValidationMessage, CandidateBackingMessage, AllMessages,
+use polkadot_overseer::{
+	self as overseer,
+	AllMessages,
+	AllSubsystems,
+	HeadSupportsParachains,
+	Overseer,
+	OverseerSignal,
+	SubsystemError,
+	gen::{
+		FromOverseer,
+		SpawnedSubsystem,
+	},
+};
+use polkadot_node_subsystem_types::messages::{
+	CandidateValidationMessage, CandidateBackingMessage,
 };
 
 struct AlwaysSupportsParachains;
@@ -40,45 +50,53 @@ impl HeadSupportsParachains for AlwaysSupportsParachains {
 	fn head_supports_parachains(&self, _head: &Hash) -> bool { true }
 }
 
+
+////////
+
 struct Subsystem1;
 
 impl Subsystem1 {
-	async fn run(mut ctx: impl SubsystemContext<Message=CandidateBackingMessage>)  {
-		loop {
+	async fn run<Ctx>(mut ctx: Ctx) -> ()
+	where
+		Ctx: overseer::SubsystemContext<Message=CandidateBackingMessage,AllMessages=AllMessages,Signal=OverseerSignal>,
+	{
+		'louy: loop {
 			match ctx.try_recv().await {
 				Ok(Some(msg)) => {
 					if let FromOverseer::Communication { msg } = msg {
 						tracing::info!("msg {:?}", msg);
 					}
-					continue;
+					continue 'louy;
 				}
 				Ok(None) => (),
 				Err(_) => {
 					tracing::info!("exiting");
-					return;
+					break 'louy;
 				}
 			}
 
 			Delay::new(Duration::from_secs(1)).await;
 			let (tx, _) = oneshot::channel();
 
-			ctx.send_message(AllMessages::CandidateValidation(
-				CandidateValidationMessage::ValidateFromChainState(
-					Default::default(),
-					PoV {
-						block_data: BlockData(Vec::new()),
-					}.into(),
-					tx,
-				)
-			)).await;
+			let msg = CandidateValidationMessage::ValidateFromChainState(
+				Default::default(),
+				PoV {
+					block_data: BlockData(Vec::new()),
+				}.into(),
+				tx,
+			);
+			ctx.send_message(<Ctx as overseer::SubsystemContext>::AllMessages::from(msg)).await;
 		}
+		()
 	}
 }
 
-impl<C> Subsystem<C> for Subsystem1
-	where C: SubsystemContext<Message=CandidateBackingMessage>
+
+impl<Context> overseer::Subsystem<Context,SubsystemError> for Subsystem1
+where
+	Context: overseer::SubsystemContext<Message=CandidateBackingMessage,AllMessages=AllMessages,Signal=OverseerSignal>,
 {
-	fn start(self, ctx: C) -> SpawnedSubsystem {
+	fn start(self, ctx: Context) -> SpawnedSubsystem<SubsystemError> {
 		let future = Box::pin(async move {
 			Self::run(ctx).await;
 			Ok(())
@@ -91,10 +109,15 @@ impl<C> Subsystem<C> for Subsystem1
 	}
 }
 
+//////////////////
+
 struct Subsystem2;
 
 impl Subsystem2 {
-	async fn run(mut ctx: impl SubsystemContext<Message=CandidateValidationMessage>)  {
+	async fn run<Ctx>(mut ctx: Ctx)
+	where
+		Ctx: overseer::SubsystemContext<Message=CandidateValidationMessage,AllMessages=AllMessages,Signal=OverseerSignal>,
+	{
 		ctx.spawn(
 			"subsystem-2-job",
 			Box::pin(async {
@@ -121,10 +144,11 @@ impl Subsystem2 {
 	}
 }
 
-impl<C> Subsystem<C> for Subsystem2
-	where C: SubsystemContext<Message=CandidateValidationMessage>
+impl<Context> overseer::Subsystem<Context,SubsystemError> for Subsystem2
+where
+	Context: overseer::SubsystemContext<Message=CandidateValidationMessage,AllMessages=AllMessages,Signal=OverseerSignal>,
 {
-	fn start(self, ctx: C) -> SpawnedSubsystem {
+	fn start(self, ctx: Context) -> SpawnedSubsystem<SubsystemError> {
 		let future = Box::pin(async move {
 			Self::run(ctx).await;
 			Ok(())
@@ -147,7 +171,9 @@ fn main() {
 
 		let all_subsystems = AllSubsystems::<()>::dummy()
 			.replace_candidate_validation(Subsystem2)
-			.replace_candidate_backing(Subsystem1);
+			.replace_candidate_backing(Subsystem1)
+			;
+
 		let (overseer, _handler) = Overseer::new(
 			vec![],
 			all_subsystems,
diff --git a/polkadot/node/overseer/overseer-gen/Cargo.toml b/polkadot/node/overseer/overseer-gen/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..bb7f4836c6f1568d38c6c3e5de9f394fd3289a1a
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/Cargo.toml
@@ -0,0 +1,22 @@
+[package]
+name = "polkadot-overseer-gen"
+version = "0.1.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2018"
+description = "Generate an overseer including builder pattern and message wrapper from a single struct."
+
+[dependencies]
+tracing = "0.1"
+futures = "0.3"
+async-trait = "0.1"
+thiserror = "1"
+metered = { package = "metered-channel", path = "../../metered-channel" }
+polkadot-overseer-gen-proc-macro = { path = "./proc-macro" }
+polkadot-node-network-protocol = { path = "../../network/protocol"}
+# trait SpawnNamed
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
+futures-timer = "3.0.2"
+pin-project = "1.0"
+
+[dev-dependencies]
+trybuild = "1.0.41"
diff --git a/polkadot/node/overseer/overseer-gen/examples/dummy.rs b/polkadot/node/overseer/overseer-gen/examples/dummy.rs
new file mode 100644
index 0000000000000000000000000000000000000000..401e70e89f2d2054f218e3ef26e58d0c1d630124
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/examples/dummy.rs
@@ -0,0 +1,134 @@
+//! A dummy to be used with cargo expand
+
+use polkadot_overseer_gen::*;
+use polkadot_node_network_protocol::WrongVariant;
+
+
+/// Concrete subsystem implementation for `MsgStrukt` msg type.
+#[derive(Default)]
+pub struct AwesomeSubSys;
+
+impl ::polkadot_overseer_gen::Subsystem<XxxSubsystemContext<MsgStrukt>, Yikes> for  AwesomeSubSys {
+	fn start(self, _ctx: XxxSubsystemContext<MsgStrukt>) -> SpawnedSubsystem < Yikes > {
+		unimplemented!("starting yay!")
+	}
+}
+
+#[derive(Default)]
+pub struct GoblinTower;
+
+impl ::polkadot_overseer_gen::Subsystem<XxxSubsystemContext<Plinko>, Yikes> for GoblinTower {
+	fn start(self, _ctx: XxxSubsystemContext<Plinko>) -> SpawnedSubsystem < Yikes > {
+		unimplemented!("welcum")
+	}
+}
+
+
+/// A signal sent by the overseer.
+#[derive(Debug, Clone)]
+pub struct SigSigSig;
+
+
+/// The external event.
+#[derive(Debug, Clone)]
+pub struct EvX;
+
+
+impl EvX {
+	pub fn focus<'a, T>(&'a self) -> Result<EvX, ()> {
+		unimplemented!("dispatch")
+	}
+}
+
+#[derive(Debug, Clone, Copy)]
+pub struct Yikes;
+
+impl std::fmt::Display for Yikes {
+	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+		writeln!(f, "yikes!")
+	}
+}
+
+impl std::error::Error for Yikes {}
+
+impl From<polkadot_overseer_gen::OverseerError> for Yikes {
+	fn from(_: polkadot_overseer_gen::OverseerError) -> Yikes {
+		Yikes
+	}
+}
+
+impl From<polkadot_overseer_gen::mpsc::SendError> for Yikes {
+	fn from(_: polkadot_overseer_gen::mpsc::SendError) -> Yikes {
+		Yikes
+	}
+}
+
+#[derive(Debug, Clone)]
+pub struct MsgStrukt(u8);
+
+#[derive(Debug, Clone, Copy)]
+pub struct Plinko;
+
+impl From<NetworkMsg> for MsgStrukt {
+	fn from(_event: NetworkMsg) -> Self {
+		MsgStrukt(1u8)
+	}
+}
+
+
+#[derive(Debug, Clone, Copy)]
+pub enum NetworkMsg {
+	A,
+	B,
+	C,
+}
+
+
+impl NetworkMsg {
+	fn focus(&self) -> Result<Self, WrongVariant> {
+		Ok(match self {
+			Self::B => return Err(WrongVariant),
+			Self::A | Self::C => self.clone()
+		})
+	}
+}
+
+
+
+#[overlord(signal=SigSigSig, event=EvX, error=Yikes, network=NetworkMsg, gen=AllMessages)]
+struct Xxx {
+	#[subsystem(MsgStrukt)]
+	sub0: AwesomeSubSys,
+
+	#[subsystem(no_dispatch, blocking, Plinko)]
+	plinkos: GoblinTower,
+
+	i_like_pi: f64,
+}
+
+#[derive(Debug, Clone)]
+struct DummySpawner;
+
+impl SpawnNamed for DummySpawner{
+	fn spawn_blocking(&self, name: &'static str, _future: futures::future::BoxFuture<'static, ()>) {
+		unimplemented!("spawn blocking {}", name)
+	}
+
+	fn spawn(&self, name: &'static str, _future: futures::future::BoxFuture<'static, ()>) {
+		unimplemented!("spawn {}", name)
+	}
+}
+
+#[derive(Debug, Clone)]
+struct DummyCtx;
+
+fn main() {
+	let (overseer, _handler): (Xxx<_>, _) = Xxx::builder()
+		.sub0(AwesomeSubSys::default())
+		.plinkos(GoblinTower::default())
+		.i_like_pi(::std::f64::consts::PI)
+		.spawner(DummySpawner)
+		.build()
+		.unwrap();
+	assert_eq!(overseer.i_like_pi.floor() as i8, 3);
+}
diff --git a/polkadot/node/overseer/overseer-gen/proc-macro/Cargo.toml b/polkadot/node/overseer/overseer-gen/proc-macro/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..ba8b32f96825169d8c984643ab668534b78094bd
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/proc-macro/Cargo.toml
@@ -0,0 +1,21 @@
+[package]
+name = "polkadot-overseer-gen-proc-macro"
+version = "0.1.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2018"
+description = "Generate an overseer including builder pattern and message wrapper from a single annotated struct definition."
+
+[package.metadata.docs.rs]
+targets = ["x86_64-unknown-linux-gnu"]
+
+[lib]
+proc-macro = true
+
+[dependencies]
+syn = { version = "1.0.60", features = ["full", "extra-traits"] }
+quote = "1.0.9"
+proc-macro2 = "1.0.26"
+proc-macro-crate = "1.0.0"
+
+[dev-dependencies]
+assert_matches = "1.5.0"
diff --git a/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_builder.rs b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_builder.rs
new file mode 100644
index 0000000000000000000000000000000000000000..256288368b8e21d7877378191035ae80ba5c7061
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_builder.rs
@@ -0,0 +1,373 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use quote::quote;
+use syn::Ident;
+
+use super::*;
+
+/// Implement a builder pattern for the `Overseer`-type,
+/// which acts as the gateway to constructing the overseer.
+///
+/// Elements tagged with `wip` are not covered here.
+pub(crate) fn impl_builder(info: &OverseerInfo) -> proc_macro2::TokenStream {
+	let overseer_name = info.overseer_name.clone();
+	let builder = Ident::new(&(overseer_name.to_string() + "Builder"), overseer_name.span());
+	let handle = Ident::new(&(overseer_name.to_string() + "Handle"), overseer_name.span());
+
+	let subsystem_name = &info.subsystem_names_without_wip();
+	let builder_generic_ty = &info.builder_generic_types();
+
+	let channel_name = &info.channel_names_without_wip("");
+	let channel_name_unbounded = &info.channel_names_without_wip("_unbounded");
+
+	let channel_name_tx = &info.channel_names_without_wip("_tx");
+	let channel_name_unbounded_tx = &info.channel_names_without_wip("_unbounded_tx");
+
+	let channel_name_rx = &info.channel_names_without_wip("_rx");
+	let channel_name_unbounded_rx = &info.channel_names_without_wip("_unbounded_rx");
+
+	let baggage_generic_ty = &info.baggage_generic_types();
+	let baggage_name = &info.baggage_names();
+	let baggage_ty = &info.baggage_types();
+
+	let error_ty = &info.extern_error_ty;
+
+	let support_crate = info.support_crate_name();
+
+	let blocking = &info
+		.subsystems()
+		.iter()
+		.map(|x| {
+			if x.blocking {
+				quote! { Blocking }
+			} else {
+				quote! { Regular }
+			}
+		})
+		.collect::<Vec<_>>();
+
+	let generics = quote! {
+		< S, #( #baggage_generic_ty, )* >
+	};
+	let where_clause = quote! {
+		where
+			S: #support_crate ::SpawnNamed,
+	};
+
+	let builder_generics = quote! {
+		<S, #( #baggage_generic_ty, )* #( #builder_generic_ty, )* >
+	};
+
+	// all subsystems must have the same context
+	// even if the overseer does not impose such a limit.
+	let builder_additional_generics = quote! {
+		<#( #builder_generic_ty, )* >
+	};
+
+	let consumes = &info.consumes();
+
+	let subsyste_ctx_name = Ident::new(
+		&(overseer_name.to_string() + "SubsystemContext"),
+		overseer_name.span()
+	);
+
+	let builder_where_clause = quote! {
+		where
+			S: #support_crate ::SpawnNamed,
+		#(
+			#builder_generic_ty : Subsystem<#subsyste_ctx_name< #consumes >, #error_ty>,
+		)*
+	};
+
+	let event = &info.extern_event_ty;
+
+	let mut ts = quote! {
+		impl #generics #overseer_name #generics #where_clause {
+			/// Create a new overseer utilizing the builder.
+			pub fn builder #builder_additional_generics () -> #builder #builder_generics
+				#builder_where_clause
+			{
+				#builder :: default()
+			}
+		}
+
+		/// Handle for an overseer.
+		pub type #handle = #support_crate ::metered::MeteredSender< #event >;
+
+		#[allow(missing_docs)]
+		pub struct #builder #builder_generics {
+			#(
+				#subsystem_name : ::std::option::Option< #builder_generic_ty >,
+			)*
+			#(
+				#baggage_name : ::std::option::Option< #baggage_ty >,
+			)*
+			spawner: ::std::option::Option< S >,
+		}
+
+		impl #builder_generics Default for #builder #builder_generics {
+			fn default() -> Self {
+				// explicitly assure the required traits are implemented
+				fn trait_from_must_be_implemented<E>()
+				where
+					E: std::error::Error + Send + Sync + 'static + From<#support_crate ::OverseerError>
+				{}
+
+				trait_from_must_be_implemented::< #error_ty >();
+
+				Self {
+				#(
+					#subsystem_name: None,
+				)*
+				#(
+					#baggage_name: None,
+				)*
+					spawner: None,
+				}
+			}
+		}
+
+		impl #builder_generics #builder #builder_generics #builder_where_clause {
+			/// The spawner to use for spawning tasks.
+			pub fn spawner(mut self, spawner: S) -> Self
+			where
+				S: #support_crate ::SpawnNamed + Send
+			{
+				self.spawner = Some(spawner);
+				self
+			}
+
+			#(
+				/// Specify the particular subsystem implementation.
+				pub fn #subsystem_name (mut self, subsystem: #builder_generic_ty ) -> Self {
+					self. #subsystem_name = Some( subsystem );
+					self
+				}
+			)*
+
+			#(
+				/// Attach the user defined addendum type.
+				pub fn #baggage_name (mut self, baggage: #baggage_ty ) -> Self {
+					self. #baggage_name = Some( baggage );
+					self
+				}
+			)*
+
+			/// Complete the construction and create the overseer type.
+			pub fn build(mut self) -> ::std::result::Result<(#overseer_name #generics, #handle), #error_ty>
+			{
+				let (events_tx, events_rx) = #support_crate ::metered::channel::<
+					#event
+				>(SIGNAL_CHANNEL_CAPACITY);
+
+				let handle: #handle = events_tx.clone();
+
+				let (to_overseer_tx, to_overseer_rx) = #support_crate ::metered::unbounded::<
+					ToOverseer
+				>();
+
+				#(
+					let (#channel_name_tx, #channel_name_rx)
+					=
+						#support_crate ::metered::channel::<
+							MessagePacket< #consumes >
+						>(CHANNEL_CAPACITY);
+				)*
+
+				#(
+					let (#channel_name_unbounded_tx, #channel_name_unbounded_rx) =
+						#support_crate ::metered::unbounded::<
+							MessagePacket< #consumes >
+						>();
+				)*
+
+				let channels_out =
+					ChannelsOut {
+						#(
+							#channel_name: #channel_name_tx .clone(),
+						)*
+						#(
+							#channel_name_unbounded: #channel_name_unbounded_tx,
+						)*
+					};
+
+				let mut spawner = self.spawner.expect("Spawner is set. qed");
+
+				let mut running_subsystems = #support_crate ::FuturesUnordered::<
+						BoxFuture<'static, ::std::result::Result<(), #error_ty > >
+					>::new();
+
+				#(
+					// TODO generate a builder pattern that ensures this
+					// TODO https://github.com/paritytech/polkadot/issues/3427
+					let #subsystem_name = self. #subsystem_name .expect("All subsystem must exist with the builder pattern.");
+
+					let unbounded_meter = #channel_name_unbounded_rx.meter().clone();
+
+					let message_rx: SubsystemIncomingMessages< #consumes > = #support_crate ::select(
+						#channel_name_rx, #channel_name_unbounded_rx
+					);
+					let (signal_tx, signal_rx) = #support_crate ::metered::channel(SIGNAL_CHANNEL_CAPACITY);
+					let ctx = #subsyste_ctx_name::< #consumes >::new(
+						signal_rx,
+						message_rx,
+						channels_out.clone(),
+						to_overseer_tx.clone(),
+					);
+
+					let #subsystem_name: OverseenSubsystem< #consumes > =
+						spawn::<_,_, #blocking, _, _, _>(
+							&mut spawner,
+							#channel_name_tx,
+							signal_tx,
+							unbounded_meter,
+							channels_out.clone(),
+							ctx,
+							#subsystem_name,
+							&mut running_subsystems,
+						)?;
+				)*
+
+				#(
+					let #baggage_name = self. #baggage_name .expect(
+						&format!("Baggage variable `{1}` of `{0}` ",
+							stringify!(#overseer_name),
+							stringify!( #baggage_name )
+						)
+					);
+				)*
+
+				use #support_crate ::StreamExt;
+
+				let to_overseer_rx = to_overseer_rx.fuse();
+				let overseer = #overseer_name {
+					#(
+						#subsystem_name,
+					)*
+
+					#(
+						#baggage_name,
+					)*
+
+					spawner,
+					running_subsystems,
+					events_rx,
+					to_overseer_rx,
+				};
+
+				Ok((overseer, handle))
+			}
+		}
+	};
+	ts.extend(impl_task_kind(info));
+	ts
+}
+
+pub(crate) fn impl_task_kind(info: &OverseerInfo) -> proc_macro2::TokenStream {
+	let signal = &info.extern_signal_ty;
+	let error_ty = &info.extern_error_ty;
+	let support_crate = info.support_crate_name();
+
+	let ts = quote! {
+
+		use #support_crate ::FutureExt as _;
+
+		/// Task kind to launch.
+		pub trait TaskKind {
+			/// Spawn a task, it depends on the implementer if this is blocking or not.
+			fn launch_task<S: SpawnNamed>(spawner: &mut S, name: &'static str, future: BoxFuture<'static, ()>);
+		}
+
+		#[allow(missing_docs)]
+		struct Regular;
+		impl TaskKind for Regular {
+			fn launch_task<S: SpawnNamed>(spawner: &mut S, name: &'static str, future: BoxFuture<'static, ()>) {
+				spawner.spawn(name, future)
+			}
+		}
+
+		#[allow(missing_docs)]
+		struct Blocking;
+		impl TaskKind for Blocking {
+			fn launch_task<S: SpawnNamed>(spawner: &mut S, name: &'static str, future: BoxFuture<'static, ()>) {
+				spawner.spawn_blocking(name, future)
+			}
+		}
+
+		/// Spawn task of kind `self` using spawner `S`.
+		pub fn spawn<S, M, TK, Ctx, E, SubSys>(
+			spawner: &mut S,
+			message_tx: #support_crate ::metered::MeteredSender<MessagePacket<M>>,
+			signal_tx: #support_crate ::metered::MeteredSender< #signal >,
+			// meter for the unbounded channel
+			unbounded_meter: #support_crate ::metered::Meter,
+			// connection to the subsystems
+			channels_out: ChannelsOut,
+			ctx: Ctx,
+			s: SubSys,
+			futures: &mut #support_crate ::FuturesUnordered<BoxFuture<'static, ::std::result::Result<(), #error_ty> >>,
+		) -> ::std::result::Result<OverseenSubsystem<M>, #error_ty >
+		where
+			S: #support_crate ::SpawnNamed,
+			M: std::fmt::Debug + Send + 'static,
+			TK: TaskKind,
+			Ctx: #support_crate ::SubsystemContext<Message=M>,
+			E: std::error::Error + Send + Sync + 'static + From<#support_crate ::OverseerError>,
+			SubSys: #support_crate ::Subsystem<Ctx, E>,
+		{
+			let #support_crate ::SpawnedSubsystem::<E> { future, name } = s.start(ctx);
+
+			let (tx, rx) = #support_crate ::oneshot::channel();
+
+			let fut = Box::pin(async move {
+				if let Err(e) = future.await {
+					#support_crate ::tracing::error!(subsystem=name, err = ?e, "subsystem exited with error");
+				} else {
+					#support_crate ::tracing::debug!(subsystem=name, "subsystem exited without an error");
+				}
+				let _ = tx.send(());
+			});
+
+			<TK as TaskKind>::launch_task(spawner, name, fut);
+
+			futures.push(Box::pin(
+				rx.map(|e| {
+					tracing::warn!(err = ?e, "dropping error");
+					Ok(())
+				})
+			));
+
+			let instance = Some(SubsystemInstance {
+				meters: #support_crate ::SubsystemMeters {
+					unbounded: unbounded_meter,
+					bounded: message_tx.meter().clone(),
+					signals: signal_tx.meter().clone(),
+				},
+				tx_signal: signal_tx,
+				tx_bounded: message_tx,
+				signals_received: 0,
+				name,
+			});
+
+			Ok(OverseenSubsystem {
+				instance,
+			})
+		}
+	};
+
+	ts
+}
diff --git a/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_channels_out.rs b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_channels_out.rs
new file mode 100644
index 0000000000000000000000000000000000000000..f2d6e88b360b3938d6ef6d8baec8d300d4c8778f
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_channels_out.rs
@@ -0,0 +1,128 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use quote::quote;
+use syn::Result;
+
+use super::*;
+
+/// Implement the helper type `ChannelsOut` and `MessagePacket<T>`.
+pub(crate) fn impl_channels_out_struct(info: &OverseerInfo) -> Result<proc_macro2::TokenStream> {
+	let message_wrapper = info.message_wrapper.clone();
+
+	let channel_name = &info.channel_names_without_wip("");
+	let channel_name_unbounded = &info.channel_names_without_wip("_unbounded");
+
+	let consumes = &info.consumes_without_wip();
+
+	let consumes_variant = &info.variant_names_without_wip();
+	let unconsumes_variant = &info.variant_names_only_wip();
+
+	let support_crate = info.support_crate_name();
+
+	let ts = quote! {
+		/// Collection of channels to the individual subsystems.
+		///
+		/// Naming is from the point of view of the overseer.
+		#[derive(Debug, Clone)]
+		pub struct ChannelsOut {
+			#(
+				/// Bounded channel sender, connected to a subsystem.
+				pub #channel_name:
+					#support_crate ::metered::MeteredSender<
+						MessagePacket< #consumes >
+					>,
+			)*
+
+			#(
+				/// Unbounded channel sender, connected to a subsystem.
+				pub #channel_name_unbounded:
+					#support_crate ::metered::UnboundedMeteredSender<
+						MessagePacket< #consumes >
+					>,
+			)*
+		}
+
+		impl ChannelsOut {
+			/// Send a message via a bounded channel.
+			pub async fn send_and_log_error(
+				&mut self,
+				signals_received: usize,
+				message: #message_wrapper,
+			) {
+				let res: ::std::result::Result<_, _> = match message {
+				#(
+					#message_wrapper :: #consumes_variant ( inner ) => {
+						self. #channel_name .send(
+							#support_crate ::make_packet(signals_received, inner)
+						).await.map_err(|_| stringify!( #channel_name ))
+					}
+				)*
+					// subsystems that are wip
+				#(
+					#message_wrapper :: #unconsumes_variant ( _ ) => Ok(()),
+				)*
+					// dummy message type
+					#message_wrapper :: Empty => Ok(()),
+				};
+
+				if let Err(subsystem_name) = res {
+					#support_crate ::tracing::debug!(
+						target: LOG_TARGET,
+						"Failed to send (bounded) a message to {} subsystem",
+						subsystem_name
+					);
+				}
+			}
+
+			/// Send a message to another subsystem via an unbounded channel.
+			pub fn send_unbounded_and_log_error(
+				&self,
+				signals_received: usize,
+				message: #message_wrapper,
+			) {
+				use ::std::sync::mpsc::TrySendError;
+
+				let res: ::std::result::Result<_, _> = match message {
+				#(
+					#message_wrapper :: #consumes_variant (inner) => {
+						self. #channel_name_unbounded .unbounded_send(
+							#support_crate ::make_packet(signals_received, inner)
+						)
+						.map_err(|_| stringify!( #channel_name ))
+					},
+				)*
+					// subsystems that are wip
+				#(
+					#message_wrapper :: #unconsumes_variant ( _ ) => Ok(()),
+				)*
+					// dummy message type
+					#message_wrapper :: Empty => Ok(())
+				};
+
+				if let Err(subsystem_name) = res {
+					#support_crate ::tracing::debug!(
+						target: LOG_TARGET,
+						"Failed to send_unbounded a message to {} subsystem",
+						subsystem_name
+					);
+				}
+			}
+		}
+
+	};
+	Ok(ts)
+}
diff --git a/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_dispatch.rs b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_dispatch.rs
new file mode 100644
index 0000000000000000000000000000000000000000..687d094be53246fd6dc2c61e8ceb1d8c455422e5
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_dispatch.rs
@@ -0,0 +1,70 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use super::*;
+use proc_macro2::{TokenStream, Ident};
+use quote::quote;
+use syn::Path;
+
+pub(crate) fn impl_dispatch(info: &OverseerInfo) -> TokenStream {
+	let message_wrapper = &info.message_wrapper;
+
+	let dispatchable_variant = info
+		.subsystems()
+		.into_iter()
+		.filter(|ssf| !ssf.no_dispatch)
+		.filter(|ssf| !ssf.wip)
+		.map(|ssf| ssf.generic.clone())
+		.collect::<Vec<Ident>>();
+
+	let dispatchable_message = info
+		.subsystems()
+		.into_iter()
+		.filter(|ssf| !ssf.no_dispatch)
+		.filter(|ssf| !ssf.wip)
+		.map(|ssf| ssf.consumes.clone())
+		.collect::<Vec<Path>>();
+
+	let mut ts = TokenStream::new();
+	if let Some(extern_network_ty) = &info.extern_network_ty.clone() {
+		ts.extend(quote! {
+			impl #message_wrapper {
+				/// Generated dispatch iterator generator.
+				pub fn dispatch_iter(extern_msg: #extern_network_ty) -> impl Iterator<Item=Self> + Send {
+					::std::array::IntoIter::new([
+					#(
+						extern_msg
+							// focuses on a `NetworkBridgeEvent< protocol_v1::* >`
+							// TODO do not require this to be hardcoded, either externalize or ...
+							// https://github.com/paritytech/polkadot/issues/3427
+							.focus()
+							.ok()
+							.map(|event| {
+								#message_wrapper :: #dispatchable_variant (
+									// the inner type of the enum variant
+									#dispatchable_message :: from( event )
+								)
+							}),
+					)*
+					])
+					.into_iter()
+					.filter_map(|x: Option<_>| x)
+				}
+			}
+		});
+	}
+	ts
+}
diff --git a/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_message_wrapper.rs b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_message_wrapper.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b2cb7ce11f915d162080e80d7fda4f72eb2bce6b
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_message_wrapper.rs
@@ -0,0 +1,83 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use quote::quote;
+use syn::Result;
+use syn::spanned::Spanned;
+
+use super::*;
+
+/// Generates the wrapper type enum.
+pub(crate) fn impl_message_wrapper_enum(info: &OverseerInfo) -> Result<proc_macro2::TokenStream> {
+	let consumes = info.consumes();
+	let consumes_variant = info.variant_names();
+
+	let outgoing = &info.outgoing_ty;
+
+	let message_wrapper = &info.message_wrapper;
+
+	let (outgoing_from_impl, outgoing_decl) = if let Some(outgoing) = outgoing {
+		let outgoing_variant = outgoing
+			.get_ident()
+			.ok_or_else(||{
+				syn::Error::new(outgoing.span(), "Missing identifier to use as enum variant for outgoing.")
+			})?;
+		(quote! {
+			impl ::std::convert::From< #outgoing > for #message_wrapper {
+				fn from(message: #outgoing) -> Self {
+					#message_wrapper :: #outgoing_variant ( message )
+				}
+			}
+		},
+		quote! {
+			#outgoing_variant ( #outgoing ) ,
+		})
+	} else {
+		(TokenStream::new(), TokenStream::new())
+	};
+
+	let ts = quote! {
+		/// Generated message type wrapper
+		#[allow(missing_docs)]
+		#[derive(Debug)]
+		pub enum #message_wrapper {
+			#(
+				#consumes_variant ( #consumes ),
+			)*
+			#outgoing_decl
+			// dummy message type
+			Empty,
+		}
+
+		impl ::std::convert::From< () > for #message_wrapper {
+			fn from(_: ()) -> Self {
+				#message_wrapper :: Empty
+			}
+		}
+
+		#(
+			impl ::std::convert::From< #consumes > for #message_wrapper {
+				fn from(message: #consumes) -> Self {
+					#message_wrapper :: #consumes_variant ( message )
+				}
+			}
+		)*
+
+		#outgoing_from_impl
+	};
+
+	Ok(ts)
+}
diff --git a/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_misc.rs b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_misc.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b3406b62f7eb6b243ab64a6ebc3835b832777aef
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_misc.rs
@@ -0,0 +1,248 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use quote::quote;
+use syn::Ident;
+
+use super::*;
+
+/// Implement a builder pattern for the `Overseer`-type,
+/// which acts as the gateway to constructing the overseer.
+pub(crate) fn impl_misc(info: &OverseerInfo) -> proc_macro2::TokenStream {
+	let overseer_name = info.overseer_name.clone();
+	let subsystem_sender_name = Ident::new(&(overseer_name.to_string() + "SubsystemSender"), overseer_name.span());
+	let subsystem_ctx_name = Ident::new(&(overseer_name.to_string() + "SubsystemContext"), overseer_name.span());
+	let consumes = &info.consumes();
+	let signal = &info.extern_signal_ty;
+	let wrapper_message = &info.message_wrapper;
+	let error_ty = &info.extern_error_ty;
+	let support_crate = info.support_crate_name();
+
+	let ts = quote! {
+		/// Connector to send messages towards all subsystems,
+		/// while tracking the which signals where already received.
+		#[derive(Debug, Clone)]
+		pub struct #subsystem_sender_name {
+			/// Collection of channels to all subsystems.
+			channels: ChannelsOut,
+			/// Systemwide tick for which signals were received by all subsystems.
+			signals_received: SignalsReceived,
+		}
+
+		/// impl for wrapping message type...
+		#[#support_crate ::async_trait]
+		impl SubsystemSender< #wrapper_message > for #subsystem_sender_name {
+			async fn send_message(&mut self, msg: #wrapper_message) {
+				self.channels.send_and_log_error(self.signals_received.load(), msg).await;
+			}
+
+			async fn send_messages<T>(&mut self, msgs: T)
+			where
+				T: IntoIterator<Item = #wrapper_message> + Send,
+				T::IntoIter: Send,
+			{
+				// This can definitely be optimized if necessary.
+				for msg in msgs {
+					self.send_message(msg).await;
+				}
+			}
+
+			fn send_unbounded_message(&mut self, msg: #wrapper_message) {
+				self.channels.send_unbounded_and_log_error(self.signals_received.load(), msg);
+			}
+		}
+
+		// ... but also implement for all individual messages to avoid
+		// the necessity for manual wrapping, and do the conversion
+		// based on the generated `From::from` impl for the individual variants.
+		#(
+		#[#support_crate ::async_trait]
+		impl SubsystemSender< #consumes > for #subsystem_sender_name {
+			async fn send_message(&mut self, msg: #consumes) {
+				self.channels.send_and_log_error(self.signals_received.load(), #wrapper_message ::from ( msg )).await;
+			}
+
+			async fn send_messages<T>(&mut self, msgs: T)
+			where
+				T: IntoIterator<Item = #consumes> + Send,
+				T::IntoIter: Send,
+			{
+				// This can definitely be optimized if necessary.
+				for msg in msgs {
+					self.send_message(msg).await;
+				}
+			}
+
+			fn send_unbounded_message(&mut self, msg: #consumes) {
+				self.channels.send_unbounded_and_log_error(self.signals_received.load(), #wrapper_message ::from ( msg ));
+			}
+		}
+		)*
+
+		/// A context type that is given to the [`Subsystem`] upon spawning.
+		/// It can be used by [`Subsystem`] to communicate with other [`Subsystem`]s
+		/// or to spawn it's [`SubsystemJob`]s.
+		///
+		/// [`Overseer`]: struct.Overseer.html
+		/// [`Subsystem`]: trait.Subsystem.html
+		/// [`SubsystemJob`]: trait.SubsystemJob.html
+		#[derive(Debug)]
+		#[allow(missing_docs)]
+		pub struct #subsystem_ctx_name<M>{
+			signals: #support_crate ::metered::MeteredReceiver< #signal >,
+			messages: SubsystemIncomingMessages<M>,
+			to_subsystems: #subsystem_sender_name,
+			to_overseer: #support_crate ::metered::UnboundedMeteredSender<
+				#support_crate ::ToOverseer
+				>,
+			signals_received: SignalsReceived,
+			pending_incoming: Option<(usize, M)>,
+		}
+
+		impl<M> #subsystem_ctx_name<M> {
+			/// Create a new context.
+			fn new(
+				signals: #support_crate ::metered::MeteredReceiver< #signal >,
+				messages: SubsystemIncomingMessages<M>,
+				to_subsystems: ChannelsOut,
+				to_overseer: #support_crate ::metered::UnboundedMeteredSender<ToOverseer>,
+			) -> Self {
+				let signals_received = SignalsReceived::default();
+				#subsystem_ctx_name {
+					signals,
+					messages,
+					to_subsystems: #subsystem_sender_name {
+						channels: to_subsystems,
+						signals_received: signals_received.clone(),
+					},
+					to_overseer,
+					signals_received,
+					pending_incoming: None,
+				}
+			}
+		}
+
+		#[#support_crate ::async_trait]
+		impl<M: std::fmt::Debug + Send + 'static> SubsystemContext for #subsystem_ctx_name<M>
+		where
+			#subsystem_sender_name: #support_crate ::SubsystemSender< #wrapper_message >,
+			#wrapper_message: From<M>,
+		{
+			type Message = M;
+			type Signal = #signal;
+			type Sender = #subsystem_sender_name;
+			type AllMessages = #wrapper_message;
+			type Error = #error_ty;
+
+			async fn try_recv(&mut self) -> ::std::result::Result<Option<FromOverseer<M, #signal>>, ()> {
+				match #support_crate ::poll!(self.recv()) {
+					#support_crate ::Poll::Ready(msg) => Ok(Some(msg.map_err(|_| ())?)),
+					#support_crate ::Poll::Pending => Ok(None),
+				}
+			}
+
+			async fn recv(&mut self) -> ::std::result::Result<FromOverseer<M, #signal>, #error_ty> {
+				loop {
+					// If we have a message pending an overseer signal, we only poll for signals
+					// in the meantime.
+					if let Some((needs_signals_received, msg)) = self.pending_incoming.take() {
+						if needs_signals_received <= self.signals_received.load() {
+							return Ok(#support_crate ::FromOverseer::Communication { msg });
+						} else {
+							self.pending_incoming = Some((needs_signals_received, msg));
+
+							// wait for next signal.
+							let signal = self.signals.next().await
+								.ok_or(#support_crate ::OverseerError::Context(
+									"Signal channel is terminated and empty."
+									.to_owned()
+								))?;
+
+							self.signals_received.inc();
+							return Ok(#support_crate ::FromOverseer::Signal(signal))
+						}
+					}
+
+					let mut await_message = self.messages.next().fuse();
+					let mut await_signal = self.signals.next().fuse();
+					let signals_received = self.signals_received.load();
+					let pending_incoming = &mut self.pending_incoming;
+
+					// Otherwise, wait for the next signal or incoming message.
+					let from_overseer = #support_crate ::futures::select_biased! {
+						signal = await_signal => {
+							let signal = signal
+								.ok_or(#support_crate ::OverseerError::Context(
+									"Signal channel is terminated and empty."
+									.to_owned()
+								))?;
+
+							#support_crate ::FromOverseer::Signal(signal)
+						}
+						msg = await_message => {
+							let packet = msg
+								.ok_or(#support_crate ::OverseerError::Context(
+									"Message channel is terminated and empty."
+									.to_owned()
+								))?;
+
+							if packet.signals_received > signals_received {
+								// wait until we've received enough signals to return this message.
+								*pending_incoming = Some((packet.signals_received, packet.message));
+								continue;
+							} else {
+								// we know enough to return this message.
+								#support_crate ::FromOverseer::Communication { msg: packet.message}
+							}
+						}
+					};
+
+					if let #support_crate ::FromOverseer::Signal(_) = from_overseer {
+						self.signals_received.inc();
+					}
+
+					return Ok(from_overseer);
+				}
+			}
+
+			fn sender(&mut self) -> &mut Self::Sender {
+				&mut self.to_subsystems
+			}
+
+			fn spawn(&mut self, name: &'static str, s: Pin<Box<dyn Future<Output = ()> + Send>>)
+				-> ::std::result::Result<(), #error_ty>
+			{
+				self.to_overseer.unbounded_send(#support_crate ::ToOverseer::SpawnJob {
+					name,
+					s,
+				}).map_err(|_| #support_crate ::OverseerError::TaskSpawn(name))?;
+				Ok(())
+			}
+
+			fn spawn_blocking(&mut self, name: &'static str, s: Pin<Box<dyn Future<Output = ()> + Send>>)
+				-> ::std::result::Result<(), #error_ty>
+			{
+				self.to_overseer.unbounded_send(#support_crate ::ToOverseer::SpawnBlockingJob {
+					name,
+					s,
+				}).map_err(|_| #support_crate ::OverseerError::TaskSpawn(name))?;
+				Ok(())
+			}
+		}
+	};
+
+	ts
+}
diff --git a/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs
new file mode 100644
index 0000000000000000000000000000000000000000..3c578fdf4a518e3ec1f8da7dc3f614cc2109f334
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/proc-macro/src/impl_overseer.rs
@@ -0,0 +1,265 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use quote::quote;
+
+use super::*;
+
+pub(crate) fn impl_overseer_struct(info: &OverseerInfo) -> proc_macro2::TokenStream {
+	let message_wrapper = &info.message_wrapper.clone();
+	let overseer_name = info.overseer_name.clone();
+	let subsystem_name = &info.subsystem_names_without_wip();
+	let support_crate = info.support_crate_name();
+
+	let baggage_decl = &info.baggage_decl();
+
+	let baggage_generic_ty = &info.baggage_generic_types();
+
+	let generics = quote! {
+		< S, #( #baggage_generic_ty, )* >
+	};
+
+	let where_clause = quote! {
+		where
+			S: #support_crate ::SpawnNamed,
+	};
+	// TODO add `where ..` clauses for baggage types
+	// TODO https://github.com/paritytech/polkadot/issues/3427
+
+	let consumes = &info.consumes_without_wip();
+	let consumes_variant = &info.variant_names_without_wip();
+	let unconsumes_variant = &info.variant_names_only_wip();
+
+	let signal_ty = &info.extern_signal_ty;
+
+	let error_ty = &info.extern_error_ty;
+
+	let event_ty = &info.extern_event_ty;
+
+	let message_channel_capacity = info.message_channel_capacity;
+	let signal_channel_capacity = info.signal_channel_capacity;
+
+	let log_target = syn::LitStr::new(overseer_name.to_string().to_lowercase().as_str(), overseer_name.span());
+
+	let ts = quote! {
+		const STOP_DELAY: ::std::time::Duration = ::std::time::Duration::from_secs(1);
+
+		/// Capacity of a bounded message channel between overseer and subsystem
+		/// but also for bounded channels between two subsystems.
+		const CHANNEL_CAPACITY: usize = #message_channel_capacity;
+
+		/// Capacity of a signal channel between a subsystem and the overseer.
+		const SIGNAL_CHANNEL_CAPACITY: usize = #signal_channel_capacity;
+
+		/// The log target tag.
+		const LOG_TARGET: &'static str = #log_target;
+
+		/// The overseer.
+		pub struct #overseer_name #generics {
+
+			#(
+				/// A subsystem instance.
+				#subsystem_name: OverseenSubsystem< #consumes >,
+			)*
+
+			#(
+				/// A user specified addendum field.
+				#baggage_decl ,
+			)*
+
+			/// Responsible for driving the subsystem futures.
+			spawner: S,
+
+			/// The set of running subsystems.
+			running_subsystems: #support_crate ::FuturesUnordered<
+				BoxFuture<'static, ::std::result::Result<(), #error_ty>>
+			>,
+
+			/// Gather running subsystems' outbound streams into one.
+			to_overseer_rx: #support_crate ::stream::Fuse<
+				#support_crate ::metered::UnboundedMeteredReceiver< ToOverseer >
+			>,
+
+			/// Events that are sent to the overseer from the outside world.
+			events_rx: #support_crate ::metered::MeteredReceiver< #event_ty >,
+		}
+
+		impl #generics #overseer_name #generics #where_clause {
+			/// Send the given signal, a terminatin signal, to all subsystems
+			/// and wait for all subsystems to go down.
+			///
+			/// The definition of a termination signal is up to the user and
+			/// implementation specific.
+			pub async fn wait_terminate(&mut self, signal: #signal_ty, timeout: ::std::time::Duration) -> ::std::result::Result<(), #error_ty > {
+				#(
+					::std::mem::drop(self. #subsystem_name .send_signal(signal.clone()).await);
+				)*
+				let _ = signal;
+
+				let mut timeout_fut = #support_crate ::Delay::new(
+						timeout
+					).fuse();
+
+				loop {
+					select! {
+						_ = self.running_subsystems.next() => {
+							if self.running_subsystems.is_empty() {
+								break;
+							}
+						},
+						_ = timeout_fut => break,
+						complete => break,
+					}
+				}
+
+				Ok(())
+			}
+
+			/// Broadcast a signal to all subsystems.
+			pub async fn broadcast_signal(&mut self, signal: #signal_ty) -> ::std::result::Result<(), #error_ty > {
+				#(
+					let _ = self. #subsystem_name .send_signal(signal.clone()).await;
+				)*
+				let _ = signal;
+
+				Ok(())
+			}
+
+			/// Route a particular message to a subsystem that consumes the message.
+			pub async fn route_message(&mut self, message: #message_wrapper, origin: &'static str) -> ::std::result::Result<(), #error_ty > {
+				match message {
+					#(
+						#message_wrapper :: #consumes_variant ( inner ) =>
+							OverseenSubsystem::< #consumes >::send_message2(&mut self. #subsystem_name, inner, origin ).await?,
+					)*
+					// subsystems that are still work in progress
+					#(
+						#message_wrapper :: #unconsumes_variant ( _ ) => {}
+					)*
+					#message_wrapper :: Empty => {}
+				}
+				Ok(())
+			}
+
+			/// Extract information from each subsystem.
+			pub fn map_subsystems<'a, Mapper, Output>(&'a self, mapper: Mapper)
+			-> Vec<Output>
+				where
+				#(
+					Mapper: MapSubsystem<&'a OverseenSubsystem< #consumes >, Output=Output>,
+				)*
+			{
+				vec![
+				#(
+					mapper.map_subsystem( & self. #subsystem_name ),
+				)*
+				]
+			}
+
+			/// Get access to internal task spawner.
+			pub fn spawner<'a> (&'a mut self) -> &'a mut S {
+				&mut self.spawner
+			}
+		}
+
+	};
+
+	ts
+}
+
+pub(crate) fn impl_overseen_subsystem(info: &OverseerInfo) -> proc_macro2::TokenStream {
+	let signal = &info.extern_signal_ty;
+	let error_ty = &info.extern_error_ty;
+	let support_crate = info.support_crate_name();
+
+	let ts = quote::quote! {
+
+		use #support_crate ::futures::SinkExt as _;
+
+		/// A subsystem that the overseer oversees.
+		///
+		/// Ties together the [`Subsystem`] itself and it's running instance
+		/// (which may be missing if the [`Subsystem`] is not running at the moment
+		/// for whatever reason).
+		///
+		/// [`Subsystem`]: trait.Subsystem.html
+		pub struct OverseenSubsystem<M> {
+			/// The instance.
+			pub instance: std::option::Option<
+				#support_crate ::SubsystemInstance<M, #signal>
+			>,
+		}
+
+		impl<M> OverseenSubsystem<M> {
+			/// Send a message to the wrapped subsystem.
+			///
+			/// If the inner `instance` is `None`, nothing is happening.
+			pub async fn send_message2(&mut self, message: M, origin: &'static str) -> ::std::result::Result<(), #error_ty > {
+				const MESSAGE_TIMEOUT: Duration = Duration::from_secs(10);
+
+				if let Some(ref mut instance) = self.instance {
+					match instance.tx_bounded.send(MessagePacket {
+						signals_received: instance.signals_received,
+						message: message.into(),
+					}).timeout(MESSAGE_TIMEOUT).await
+					{
+						None => {
+							#support_crate ::tracing::error!(
+								target: LOG_TARGET,
+								%origin,
+								"Subsystem {} appears unresponsive.",
+								instance.name,
+							);
+							Err(#error_ty :: from(
+								#support_crate ::OverseerError::SubsystemStalled(instance.name)
+							))
+						}
+						Some(res) => res.map_err(Into::into),
+					}
+				} else {
+					Ok(())
+				}
+			}
+
+			/// Send a signal to the wrapped subsystem.
+			///
+			/// If the inner `instance` is `None`, nothing is happening.
+			pub async fn send_signal(&mut self, signal: #signal) -> ::std::result::Result<(), #error_ty > {
+				const SIGNAL_TIMEOUT: ::std::time::Duration = ::std::time::Duration::from_secs(10);
+
+				if let Some(ref mut instance) = self.instance {
+					match instance.tx_signal.send(signal).timeout(SIGNAL_TIMEOUT).await {
+						None => {
+							Err(#error_ty :: from(
+								#support_crate ::OverseerError::SubsystemStalled(instance.name)
+							))
+						}
+						Some(res) => {
+							let res = res.map_err(Into::into);
+							if res.is_ok() {
+								instance.signals_received += 1;
+							}
+							res
+						}
+					}
+				} else {
+					Ok(())
+				}
+			}
+		}
+	};
+	ts
+}
diff --git a/polkadot/node/overseer/overseer-gen/proc-macro/src/lib.rs b/polkadot/node/overseer/overseer-gen/proc-macro/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..9fecfd19241aa68cd8bc08d98cff6c1cbbede4ba
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/proc-macro/src/lib.rs
@@ -0,0 +1,94 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+#![deny(unused_crate_dependencies)]
+
+use proc_macro2::{Span, Ident, TokenStream};
+use syn::{parse2, Result};
+use quote::{quote, ToTokens};
+
+mod impl_builder;
+mod impl_misc;
+mod impl_overseer;
+mod parse_attr;
+mod parse_struct;
+mod impl_channels_out;
+mod impl_dispatch;
+mod impl_message_wrapper;
+
+use impl_builder::*;
+use impl_channels_out::*;
+use impl_dispatch::*;
+use impl_message_wrapper::*;
+use impl_misc::*;
+use impl_overseer::*;
+use parse_attr::*;
+use parse_struct::*;
+
+#[cfg(test)]
+mod tests;
+
+#[proc_macro_attribute]
+pub fn overlord(attr: proc_macro::TokenStream, item: proc_macro::TokenStream) -> proc_macro::TokenStream {
+	let attr: TokenStream = attr.into();
+	let item: TokenStream = item.into();
+	impl_overseer_gen(attr, item).unwrap_or_else(|err| err.to_compile_error()).into()
+}
+
+pub(crate) fn impl_overseer_gen(attr: TokenStream, orig: TokenStream) -> Result<proc_macro2::TokenStream> {
+	let args: AttrArgs = parse2(attr)?;
+	let message_wrapper = args.message_wrapper;
+
+	let of: OverseerGuts = parse2(orig)?;
+
+	let support_crate_name = if cfg!(test) {
+		quote!{crate}
+	} else {
+		use proc_macro_crate::{crate_name, FoundCrate};
+		let crate_name = crate_name("polkadot-overseer-gen")
+			.expect("Support crate polkadot-overseer-gen is present in `Cargo.toml`. qed");
+		match crate_name {
+		   FoundCrate::Itself => quote!{crate},
+		   FoundCrate::Name(name) => Ident::new(&name, Span::call_site()).to_token_stream(),
+	   	}
+	};
+	let info = OverseerInfo {
+		support_crate_name,
+		subsystems: of.subsystems,
+		baggage: of.baggage,
+		overseer_name: of.name,
+		message_wrapper,
+		message_channel_capacity: args.message_channel_capacity,
+		signal_channel_capacity: args.signal_channel_capacity,
+		extern_event_ty: args.extern_event_ty,
+		extern_signal_ty: args.extern_signal_ty,
+		extern_error_ty: args.extern_error_ty,
+		extern_network_ty: args.extern_network_ty,
+		outgoing_ty: args.outgoing_ty,
+	};
+
+	let mut additive = impl_overseer_struct(&info);
+	additive.extend(impl_builder(&info));
+
+	additive.extend(impl_overseen_subsystem(&info));
+	additive.extend(impl_channels_out_struct(&info));
+	additive.extend(impl_misc(&info));
+
+	additive.extend(impl_message_wrapper_enum(&info)?);
+	additive.extend(impl_dispatch(&info));
+
+	Ok(additive)
+}
diff --git a/polkadot/node/overseer/overseer-gen/proc-macro/src/parse_attr.rs b/polkadot/node/overseer/overseer-gen/proc-macro/src/parse_attr.rs
new file mode 100644
index 0000000000000000000000000000000000000000..60fa6ab227654a575a69668efc0d2efbae1dae7b
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/proc-macro/src/parse_attr.rs
@@ -0,0 +1,227 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use proc_macro2::Span;
+use std::collections::{hash_map::RandomState, HashMap};
+use syn::parse::{Parse, ParseBuffer};
+use syn::punctuated::Punctuated;
+use syn::spanned::Spanned;
+use syn::{Error, Ident, LitInt, Path, Result, Token};
+use quote::{quote, ToTokens};
+
+mod kw {
+	syn::custom_keyword!(event);
+	syn::custom_keyword!(signal);
+	syn::custom_keyword!(error);
+	syn::custom_keyword!(network);
+	syn::custom_keyword!(outgoing);
+	syn::custom_keyword!(gen);
+	syn::custom_keyword!(signal_capacity);
+	syn::custom_keyword!(message_capacity);
+}
+
+
+#[derive(Clone, Debug)]
+enum OverseerAttrItem {
+	ExternEventType {
+		tag: kw::event,
+		eq_token: Token![=],
+		value: Path
+	},
+	ExternNetworkType {
+		tag: kw::network,
+		eq_token: Token![=],
+		value: Path
+	},
+	ExternOverseerSignalType {
+		tag: kw::signal,
+		eq_token: Token![=],
+		value: Path
+	},
+	ExternErrorType {
+		tag: kw::error,
+		eq_token: Token![=],
+		value: Path
+	},
+	OutgoingType {
+		tag: kw::outgoing,
+		eq_token: Token![=],
+		value: Path
+	},
+	MessageWrapperName {
+		tag: kw::gen,
+		eq_token: Token![=],
+		value: Ident
+	},
+	SignalChannelCapacity {
+		tag: kw::signal_capacity,
+		eq_token: Token![=],
+		value: usize
+	},
+	MessageChannelCapacity {
+		tag: kw::message_capacity,
+		eq_token: Token![=],
+		value: usize
+	},
+}
+
+impl ToTokens for OverseerAttrItem {
+	fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
+		let ts = match self {
+			Self::ExternEventType { tag, eq_token, value } => { quote!{ #tag #eq_token, #value } }
+			Self::ExternNetworkType { tag, eq_token, value } => { quote!{ #tag #eq_token, #value } }
+			Self::ExternOverseerSignalType { tag, eq_token, value } => { quote!{ #tag #eq_token, #value } }
+			Self::ExternErrorType { tag, eq_token, value } => { quote!{ #tag #eq_token, #value } }
+			Self::OutgoingType { tag, eq_token, value } => { quote!{ #tag #eq_token, #value } }
+			Self::MessageWrapperName { tag, eq_token, value } => { quote!{ #tag #eq_token, #value } }
+			Self::SignalChannelCapacity { tag, eq_token, value } => { quote!{ #tag #eq_token, #value } }
+			Self::MessageChannelCapacity { tag, eq_token, value } => { quote!{ #tag #eq_token, #value } }
+		};
+		tokens.extend(ts.into_iter());
+	}
+}
+
+impl Parse for OverseerAttrItem {
+	fn parse(input: &ParseBuffer) -> Result<Self> {
+		let lookahead = input.lookahead1();
+		if lookahead.peek(kw::event) {
+			Ok(OverseerAttrItem::ExternEventType {
+				tag: input.parse::<kw::event>()?,
+				eq_token: input.parse()?,
+				value: input.parse()?,
+			})
+		} else if lookahead.peek(kw::signal) {
+			Ok(OverseerAttrItem::ExternOverseerSignalType {
+				tag: input.parse::<kw::signal>()?,
+				eq_token: input.parse()?,
+				value: input.parse()?,
+			})
+		} else if lookahead.peek(kw::error) {
+			Ok(OverseerAttrItem::ExternErrorType {
+				tag: input.parse::<kw::error>()?,
+				eq_token: input.parse()?,
+				value: input.parse()?,
+			})
+		} else if lookahead.peek(kw::network) {
+			Ok(OverseerAttrItem::ExternNetworkType {
+				tag: input.parse::<kw::network>()?,
+				eq_token: input.parse()?,
+				value: input.parse()?,
+			})
+		} else if lookahead.peek(kw::outgoing) {
+			Ok(OverseerAttrItem::OutgoingType {
+				tag: input.parse::<kw::outgoing>()?,
+				eq_token: input.parse()?,
+				value: input.parse()?,
+			})
+		} else if lookahead.peek(kw::gen) {
+			Ok(OverseerAttrItem::MessageWrapperName {
+				tag: input.parse::<kw::gen>()?,
+				eq_token: input.parse()?,
+				value: input.parse()?,
+			})
+		} else if lookahead.peek(kw::signal_capacity) {
+			Ok(OverseerAttrItem::SignalChannelCapacity {
+				tag: input.parse::<kw::signal_capacity>()?,
+				eq_token: input.parse()?,
+				value: input.parse::<LitInt>()?.base10_parse::<usize>()?
+			})
+		} else if lookahead.peek(kw::message_capacity) {
+			Ok(OverseerAttrItem::MessageChannelCapacity {
+				tag: input.parse::<kw::message_capacity>()?,
+				eq_token: input.parse()?,
+				value: input.parse::<LitInt>()?.base10_parse::<usize>()?,
+			})
+		} else {
+			Err(lookahead.error())
+		}
+	}
+}
+
+/// Attribute arguments
+#[derive(Clone, Debug)]
+pub(crate) struct AttrArgs {
+	pub(crate) message_wrapper: Ident,
+	pub(crate) extern_event_ty: Path,
+	pub(crate) extern_signal_ty: Path,
+	pub(crate) extern_error_ty: Path,
+	/// A external subsystem that both consumes and produces messages
+	/// but is not part of the band of subsystems, it's a mere proxy
+	/// to another entity that consumes/produces messages.
+	pub(crate) extern_network_ty: Option<Path>,
+	pub(crate) outgoing_ty: Option<Path>,
+	pub(crate) signal_channel_capacity: usize,
+	pub(crate) message_channel_capacity: usize,
+}
+
+macro_rules! extract_variant {
+	($unique:expr, $variant:ident ; default = $fallback:expr) => {
+		extract_variant!($unique, $variant)
+			.unwrap_or_else(|| { $fallback })
+	};
+	($unique:expr, $variant:ident ; err = $err:expr) => {
+		extract_variant!($unique, $variant)
+			.ok_or_else(|| {
+				Error::new(Span::call_site(), $err)
+			})
+	};
+	($unique:expr, $variant:ident) => {
+		$unique.values()
+			.find_map(|item| {
+				if let OverseerAttrItem:: $variant { value, ..} = item {
+					Some(value.clone())
+				} else {
+					None
+				}
+			})
+	};
+}
+
+impl Parse for AttrArgs {
+	fn parse(input: &ParseBuffer) -> Result<Self> {
+		let items: Punctuated<OverseerAttrItem, Token![,]> = input.parse_terminated(OverseerAttrItem::parse)?;
+
+		let mut unique = HashMap::<std::mem::Discriminant<OverseerAttrItem>, OverseerAttrItem, RandomState>::default();
+		for item in items {
+			if let Some(first) = unique.insert(std::mem::discriminant(&item), item.clone()) {
+				let mut e = Error::new(item.span(), format!("Duplicate definition of overseer generation type found"));
+				e.combine(Error::new(first.span(), "previously defined here."));
+				return Err(e);
+			}
+		}
+
+		let signal_channel_capacity = extract_variant!(unique, SignalChannelCapacity; default = 64_usize);
+		let message_channel_capacity = extract_variant!(unique, MessageChannelCapacity; default = 1024_usize);
+
+		let error = extract_variant!(unique, ExternErrorType; err = "Must declare the overseer error type via `error=..`.")?;
+		let event = extract_variant!(unique, ExternEventType; err = "Must declare the overseer event type via `event=..`.")?;
+		let signal = extract_variant!(unique, ExternOverseerSignalType; err = "Must declare the overseer signal type via `span=..`.")?;
+		let message_wrapper = extract_variant!(unique, MessageWrapperName; err = "Must declare the overseer generated wrapping message type via `gen=..`.")?;
+		let network = extract_variant!(unique, ExternNetworkType);
+		let outgoing = extract_variant!(unique, OutgoingType);
+
+		Ok(AttrArgs {
+			signal_channel_capacity,
+			message_channel_capacity,
+			extern_event_ty: event,
+			extern_signal_ty: signal,
+			extern_error_ty: error,
+			extern_network_ty: network,
+			outgoing_ty: outgoing,
+			message_wrapper,
+		})
+	}
+}
diff --git a/polkadot/node/overseer/overseer-gen/proc-macro/src/parse_struct.rs b/polkadot/node/overseer/overseer-gen/proc-macro/src/parse_struct.rs
new file mode 100644
index 0000000000000000000000000000000000000000..2448cfb143b222b2c457366ec773ea83450ed825
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/proc-macro/src/parse_struct.rs
@@ -0,0 +1,431 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use proc_macro2::{Span, TokenStream};
+use std::collections::{hash_map::RandomState, HashSet, HashMap};
+use syn::punctuated::Punctuated;
+use syn::spanned::Spanned;
+use syn::parse::{Parse, ParseStream};
+use syn::{
+	Attribute, Field, FieldsNamed, Ident, Token, Type, AttrStyle, Path,
+	Error, GenericParam, ItemStruct, Result, Visibility
+};
+
+use quote::{quote, ToTokens};
+
+mod kw {
+	syn::custom_keyword!(wip);
+	syn::custom_keyword!(no_dispatch);
+	syn::custom_keyword!(blocking);
+}
+
+
+#[derive(Clone, Debug)]
+enum SubSysAttrItem {
+	/// The subsystem is still a work in progress
+	/// and should not be communicated with.
+	Wip(kw::wip),
+	/// The subsystem is blocking and requires to be
+	/// spawned on an exclusive thread.
+	Blocking(kw::blocking),
+	/// External messages should not be - after being converted -
+	/// be dispatched to the annotated subsystem.
+	NoDispatch(kw::no_dispatch),
+}
+
+impl Parse for SubSysAttrItem {
+	fn parse(input: ParseStream) -> Result<Self> {
+		let lookahead = input.lookahead1();
+		Ok(if lookahead.peek(kw::wip) {
+			Self::Wip(input.parse::<kw::wip>()?)
+		} else if lookahead.peek(kw::blocking) {
+			Self::Blocking(input.parse::<kw::blocking>()?)
+		} else if lookahead.peek(kw::no_dispatch) {
+			Self::NoDispatch(input.parse::<kw::no_dispatch>()?)
+		} else {
+			return Err(lookahead.error())
+		})
+	}
+}
+
+impl ToTokens for SubSysAttrItem {
+	fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
+		let ts = match self {
+			Self::Wip(wip) => { quote!{ #wip } }
+			Self::Blocking(blocking) => { quote!{ #blocking } }
+			Self::NoDispatch(no_dispatch) => { quote!{ #no_dispatch } }
+		};
+		tokens.extend(ts.into_iter());
+	}
+}
+
+
+/// A field of the struct annotated with
+/// `#[subsystem(no_dispatch, , A | B | C)]`
+#[derive(Clone, Debug)]
+pub(crate) struct SubSysField {
+	/// Name of the field.
+	pub(crate) name: Ident,
+	/// Generate generic type name for the `AllSubsystems` type
+	/// which is also used `#wrapper_message :: #variant` variant
+	/// part.
+	pub(crate) generic: Ident,
+	/// Type to be consumed by the subsystem.
+	pub(crate) consumes: Path,
+	/// If `no_dispatch` is present, if the message is incoming via
+	/// an extern `Event`, it will not be dispatched to all subsystems.
+	pub(crate) no_dispatch: bool,
+	/// If the subsystem implementation is blocking execution and hence
+	/// has to be spawned on a separate thread or thread pool.
+	pub(crate) blocking: bool,
+	/// The subsystem is a work in progress.
+	/// Avoids dispatching `Wrapper` type messages, but generates the variants.
+	/// Does not require the subsystem to be instanciated with the builder pattern.
+	pub(crate) wip: bool,
+}
+
+fn try_type_to_path(ty: Type, span: Span) -> Result<Path> {
+	match ty {
+		Type::Path(path) => Ok(path.path),
+		_ => Err(Error::new(span, "Type must be a path expression.")),
+	}
+}
+
+macro_rules! extract_variant {
+	($unique:expr, $variant:ident ; default = $fallback:expr) => {
+		extract_variant!($unique, $variant)
+			.unwrap_or_else(|| { $fallback })
+	};
+	($unique:expr, $variant:ident ; err = $err:expr) => {
+		extract_variant!($unique, $variant)
+			.ok_or_else(|| {
+				Error::new(Span::call_site(), $err)
+			})
+	};
+	($unique:expr, $variant:ident) => {
+		$unique.values()
+			.find_map(|item| {
+				if let SubSysAttrItem:: $variant ( _ ) = item {
+					Some(true)
+				} else {
+					None
+				}
+			})
+	};
+}
+
+
+pub(crate) struct SubSystemTags {
+	#[allow(dead_code)]
+	pub(crate) attrs: Vec<Attribute>,
+	#[allow(dead_code)]
+	pub(crate) no_dispatch: bool,
+	/// The subsystem is WIP, only generate the `Wrapper` variant, but do not forward messages
+	/// and also not include the subsystem in the list of subsystems.
+	pub(crate) wip: bool,
+	pub(crate) blocking: bool,
+	pub(crate) consumes: Path,
+}
+
+impl Parse for SubSystemTags {
+	fn parse(input: syn::parse::ParseStream) -> Result<Self> {
+		let attrs = Attribute::parse_outer(input)?;
+
+		let input = input;
+		let content;
+		let _ = syn::parenthesized!(content in input);
+
+		let mut items = Punctuated::new();
+		while let Ok(tag) = content.call(SubSysAttrItem::parse) {
+			items.push_value(tag);
+			items.push_punct(content.call(<Token![,]>::parse)?);
+		}
+
+		assert!(items.empty_or_trailing(), "Always followed by the message type to consume. qed");
+
+		let consumes = content.parse::<Path>()?;
+
+		let mut unique = HashMap::<std::mem::Discriminant<SubSysAttrItem>, SubSysAttrItem, RandomState>::default();
+		for item in items {
+			if let Some(first) = unique.insert(std::mem::discriminant(&item), item.clone()) {
+				let mut e = Error::new(item.span(), format!("Duplicate definition of subsystem attribute found"));
+				e.combine(Error::new(first.span(), "previously defined here."));
+				return Err(e);
+			}
+		}
+
+		let no_dispatch = extract_variant!(unique, NoDispatch; default = false);
+		let blocking = extract_variant!(unique, Blocking; default = false);
+		let wip = extract_variant!(unique, Wip; default = false);
+
+		Ok(Self { attrs, no_dispatch, blocking, consumes, wip })
+	}
+}
+
+/// Fields that are _not_ subsystems.
+#[derive(Debug, Clone)]
+pub(crate) struct BaggageField {
+	pub(crate) field_name: Ident,
+	pub(crate) field_ty: Path,
+	pub(crate) generic: bool,
+	pub(crate) vis: Visibility,
+}
+
+#[derive(Clone, Debug)]
+pub(crate) struct OverseerInfo {
+	/// Where the support crate `::polkadot_overseer_gen` lives.
+	pub(crate) support_crate_name: TokenStream,
+
+	/// Fields annotated with `#[subsystem(..)]`.
+	pub(crate) subsystems: Vec<SubSysField>,
+	/// Fields that do not define a subsystem,
+	/// but are mere baggage.
+	pub(crate) baggage: Vec<BaggageField>,
+	/// Name of the wrapping enum for all messages, defaults to `AllMessages`.
+	pub(crate) message_wrapper: Ident,
+	/// Name of the overseer struct, used as a prefix for
+	/// almost all generated types.
+	pub(crate) overseer_name: Ident,
+
+	/// Size of the bounded channel.
+	pub(crate) message_channel_capacity: usize,
+	/// Size of the bounded signal channel.
+	pub(crate) signal_channel_capacity: usize,
+
+	/// Signals to be sent, sparse information that is used intermittently.
+	pub(crate) extern_signal_ty: Path,
+
+	/// Incoming event type from the outer world, usually an external framework of some sort.
+	pub(crate) extern_event_ty: Path,
+
+	/// Incoming event type from an external entity, commonly from the network.
+	pub(crate) extern_network_ty: Option<Path>,
+
+	/// Type of messages that are sent to an external subsystem.
+	/// Merely here to be included during generation of `message_wrapper` type.
+	pub(crate) outgoing_ty: Option<Path>,
+
+	/// Incoming event type from the outer world, commonly from the network.
+	pub(crate) extern_error_ty: Path,
+}
+
+impl OverseerInfo {
+	pub(crate) fn support_crate_name(&self) -> &TokenStream {
+		&self.support_crate_name
+	}
+
+	pub(crate) fn variant_names(&self) -> Vec<Ident> {
+		self.subsystems
+			.iter()
+			.map(|ssf| ssf.generic.clone())
+			.collect::<Vec<_>>()
+	}
+
+	pub(crate) fn variant_names_without_wip(&self) -> Vec<Ident> {
+		self.subsystems
+			.iter()
+			.filter(|ssf| !ssf.wip)
+			.map(|ssf| ssf.generic.clone())
+			.collect::<Vec<_>>()
+	}
+
+	pub(crate) fn variant_names_only_wip(&self) -> Vec<Ident> {
+		self.subsystems
+			.iter()
+			.filter(|ssf| ssf.wip)
+			.map(|ssf| ssf.generic.clone())
+			.collect::<Vec<_>>()
+	}
+
+	pub(crate) fn subsystems(&self) -> &[SubSysField] {
+		self.subsystems.as_slice()
+	}
+
+	pub(crate) fn subsystem_names_without_wip(&self) -> Vec<Ident> {
+		self.subsystems
+			.iter()
+			.filter(|ssf| !ssf.wip)
+			.map(|ssf| ssf.name.clone())
+			.collect::<Vec<_>>()
+	}
+
+	pub(crate) fn baggage_names(&self) -> Vec<Ident> {
+		self.baggage.iter().map(|bag| bag.field_name.clone()).collect::<Vec<_>>()
+	}
+	pub(crate) fn baggage_types(&self) -> Vec<Path> {
+		self.baggage.iter().map(|bag| bag.field_ty.clone()).collect::<Vec<_>>()
+	}
+	pub(crate) fn baggage_decl(&self) -> Vec<TokenStream> {
+		self.baggage
+			.iter()
+			.map(|bag| {
+				let BaggageField {
+					vis,
+					field_ty,
+					field_name,
+					..
+				} = bag;
+				quote!{ #vis #field_name: #field_ty }
+			})
+			.collect::<Vec<TokenStream>>()
+	}
+
+	/// Generic types per subsystem, as defined by the user.
+	pub(crate) fn builder_generic_types(&self) -> Vec<Ident> {
+		self.subsystems
+			.iter()
+			.filter(|ssf| !ssf.wip)
+			.map(|sff| sff.generic.clone())
+			.collect::<Vec<_>>()
+	}
+
+	pub(crate) fn baggage_generic_types(&self) -> Vec<Ident> {
+		self.baggage
+			.iter()
+			.filter(|bag| bag.generic)
+			.filter_map(|bag| bag.field_ty.get_ident().cloned())
+			.collect::<Vec<_>>()
+	}
+
+	pub(crate) fn consumes(&self) -> Vec<Path> {
+		self.subsystems.iter().map(|ssf| ssf.consumes.clone()).collect::<Vec<_>>()
+	}
+
+	pub(crate) fn channel_names_without_wip(&self, suffix: &'static str) -> Vec<Ident> {
+		self.subsystems
+			.iter()
+			.filter(|ssf| !ssf.wip)
+			.map(|ssf| Ident::new(&(ssf.name.to_string() + suffix), ssf.name.span()))
+			.collect::<Vec<_>>()
+	}
+
+	pub(crate) fn consumes_without_wip(&self) -> Vec<Path> {
+		self.subsystems
+			.iter()
+			.filter(|ssf| !ssf.wip)
+			.map(|ssf| ssf.consumes.clone())
+			.collect::<Vec<_>>()
+	}
+}
+
+/// Internals of the overseer.
+#[derive(Debug, Clone)]
+pub(crate) struct OverseerGuts {
+	pub(crate) name: Ident,
+	pub(crate) subsystems: Vec<SubSysField>,
+	pub(crate) baggage: Vec<BaggageField>,
+}
+
+impl OverseerGuts {
+	pub(crate) fn parse_fields(name: Ident, baggage_generics: HashSet<Ident>, fields: FieldsNamed) -> Result<Self> {
+		let n = fields.named.len();
+		let mut subsystems = Vec::with_capacity(n);
+		let mut baggage = Vec::with_capacity(n);
+
+		// The types of `#[subsystem(..)]` annotated fields
+		// have to be unique, since they are used as generics
+		// for the builder pattern besides other places.
+		let mut unique_subsystem_idents = HashSet::<Ident>::new();
+		for Field { attrs, vis, ident, ty, .. } in fields.named.into_iter() {
+			let mut consumes = attrs.iter().filter(|attr| attr.style == AttrStyle::Outer).filter_map(|attr| {
+				let span = attr.path.span();
+				attr.path.get_ident().filter(|ident| *ident == "subsystem").map(move |_ident| {
+					let attr_tokens = attr.tokens.clone();
+					(attr_tokens, span)
+				})
+			});
+			let ident = ident.ok_or_else(|| Error::new(ty.span(), "Missing identifier for member. BUG"))?;
+
+			if let Some((attr_tokens, span)) = consumes.next() {
+				if let Some((_attr_tokens2, span2)) = consumes.next() {
+					return Err({
+						let mut err = Error::new(span, "The first subsystem annotation is at");
+						err.combine(Error::new(span2, "but another here for the same field."));
+						err
+					});
+				}
+				let mut consumes_paths = Vec::with_capacity(attrs.len());
+				let attr_tokens = attr_tokens.clone();
+				let variant: SubSystemTags = syn::parse2(attr_tokens.clone())?;
+				consumes_paths.push(variant.consumes);
+
+				let field_ty = try_type_to_path(ty, span)?;
+				let generic = field_ty.get_ident().ok_or_else(|| Error::new(field_ty.span(), "Must be an identifier, not a path."))?.clone();
+				if let Some(previous) = unique_subsystem_idents.get(&generic) {
+					let mut e = Error::new(generic.span(), format!("Duplicate subsystem names `{}`", generic));
+					e.combine(Error::new(previous.span(), "previously defined here."));
+					return Err(e)
+				}
+				unique_subsystem_idents.insert(generic.clone());
+
+				subsystems.push(SubSysField {
+					name: ident,
+					generic,
+					consumes: consumes_paths[0].clone(),
+					no_dispatch: variant.no_dispatch,
+					wip: variant.wip,
+					blocking: variant.blocking,
+				});
+			} else {
+				let field_ty = try_type_to_path(ty, ident.span())?;
+				let generic = field_ty.get_ident().map(|ident| baggage_generics.contains(ident)).unwrap_or_default();
+				baggage.push(BaggageField { field_name: ident, generic, field_ty, vis });
+			}
+		}
+		Ok(Self { name, subsystems, baggage })
+	}
+}
+
+impl Parse for OverseerGuts {
+	fn parse(input: ParseStream) -> Result<Self> {
+		let ds: ItemStruct = input.parse()?;
+		match ds.fields {
+			syn::Fields::Named(named) => {
+				let name = ds.ident.clone();
+
+				// collect the indepedentent subsystem generics
+				// which need to be carried along, there are the non-generated ones
+				let mut orig_generics = ds.generics;
+
+				// remove defaults from types
+				let mut baggage_generic_idents = HashSet::with_capacity(orig_generics.params.len());
+				orig_generics.params = orig_generics
+					.params
+					.into_iter()
+					.map(|mut generic| {
+						match generic {
+							GenericParam::Type(ref mut param) => {
+								baggage_generic_idents.insert(param.ident.clone());
+								param.eq_token = None;
+								param.default = None;
+							}
+							_ => {}
+						}
+						generic
+					})
+					.collect();
+
+				Self::parse_fields(name, baggage_generic_idents, named)
+			}
+			syn::Fields::Unit => {
+				Err(Error::new(ds.fields.span(), "Must be a struct with named fields. Not an unit struct."))
+			}
+			syn::Fields::Unnamed(unnamed) => {
+				Err(Error::new(unnamed.span(), "Must be a struct with named fields. Not an unnamed fields struct."))
+			}
+		}
+	}
+}
diff --git a/polkadot/node/overseer/overseer-gen/proc-macro/src/tests.rs b/polkadot/node/overseer/overseer-gen/proc-macro/src/tests.rs
new file mode 100644
index 0000000000000000000000000000000000000000..40df210fb6f091a50711fe9d58bda291c7d41b4f
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/proc-macro/src/tests.rs
@@ -0,0 +1,117 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use super::*;
+use assert_matches::assert_matches;
+use quote::quote;
+use syn::parse_quote;
+
+#[test]
+fn print() {
+	let attr = quote! {
+		gen=AllMessage,
+		event=::some::why::ExternEvent,
+		signal=SigSigSig,
+		signal_capacity=111,
+		message_capacity=222,
+		error=OverseerError,
+	};
+
+	let item = quote! {
+		pub struct Ooooh<X = Pffffffft> where X: Secrit {
+			#[subsystem(no_dispatch, Foo)]
+			sub0: FooSubsystem,
+
+			#[subsystem(blocking, Bar)]
+			yyy: BaersBuyBilliardBalls,
+
+			#[subsystem(no_dispatch, blocking, Twain)]
+			fff: Beeeeep,
+
+			#[subsystem(Rope)]
+			mc: MountainCave,
+
+			metrics: Metrics,
+		}
+	};
+
+	let output = impl_overseer_gen(attr, item).expect("Simple example always works. qed");
+	println!("//generated:");
+	println!("{}", output);
+}
+
+#[test]
+fn struct_parse_full() {
+	let item: OverseerGuts = parse_quote! {
+		pub struct Ooooh<X = Pffffffft> where X: Secrit {
+			#[subsystem(no_dispatch, Foo)]
+			sub0: FooSubsystem,
+
+			#[subsystem(blocking, Bar)]
+			yyy: BaersBuyBilliardBalls,
+
+			#[subsystem(no_dispatch, blocking, Twain)]
+			fff: Beeeeep,
+
+			#[subsystem(Rope)]
+			mc: MountainCave,
+
+			metrics: Metrics,
+		}
+	};
+	let _ = dbg!(item);
+}
+
+#[test]
+fn struct_parse_basic() {
+	let item: OverseerGuts = parse_quote! {
+		pub struct Ooooh {
+			#[subsystem(Foo)]
+			sub0: FooSubsystem,
+		}
+	};
+	let _ = dbg!(item);
+}
+
+#[test]
+fn attr_full() {
+	let attr: AttrArgs = parse_quote! {
+		gen=AllMessage, event=::some::why::ExternEvent, signal=SigSigSig, signal_capacity=111, message_capacity=222,
+		error=OverseerError,
+	};
+	assert_matches!(attr, AttrArgs {
+		message_channel_capacity,
+		signal_channel_capacity,
+		..
+	} => {
+		assert_eq!(message_channel_capacity, 222);
+		assert_eq!(signal_channel_capacity, 111);
+	});
+}
+
+#[test]
+fn attr_partial() {
+	let attr: AttrArgs = parse_quote! {
+		gen=AllMessage, event=::some::why::ExternEvent, signal=::foo::SigSigSig,
+		error=OverseerError,
+	};
+	assert_matches!(attr, AttrArgs {
+		message_channel_capacity: _,
+		signal_channel_capacity: _,
+		..
+	} => {
+	});
+}
diff --git a/polkadot/node/overseer/overseer-gen/src/lib.rs b/polkadot/node/overseer/overseer-gen/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..4b6abdb2b20a1adec7ae30d8fbf909c12a0f01d3
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/src/lib.rs
@@ -0,0 +1,516 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+//! # Overseer
+//!
+//! `overseer` implements the Overseer architecture described in the
+//! [implementers-guide](https://w3f.github.io/parachain-implementers-guide/node/index.html).
+//! For the motivations behind implementing the overseer itself you should
+//! check out that guide, documentation in this crate will be mostly discussing
+//! technical stuff.
+//!
+//! An `Overseer` is something that allows spawning/stopping and overseeing
+//! asynchronous tasks as well as establishing a well-defined and easy to use
+//! protocol that the tasks can use to communicate with each other. It is desired
+//! that this protocol is the only way tasks communicate with each other, however
+//! at this moment there are no foolproof guards against other ways of communication.
+//!
+//! The `Overseer` is instantiated with a pre-defined set of `Subsystems` that
+//! share the same behavior from `Overseer`'s point of view.
+//!
+//! ```text
+//!                              +-----------------------------+
+//!                              |         Overseer            |
+//!                              +-----------------------------+
+//!
+//!             ................|  Overseer "holds" these and uses |..............
+//!             .                  them to (re)start things                      .
+//!             .                                                                .
+//!             .  +-------------------+                +---------------------+  .
+//!             .  |   Subsystem1      |                |   Subsystem2        |  .
+//!             .  +-------------------+                +---------------------+  .
+//!             .           |                                       |            .
+//!             ..................................................................
+//!                         |                                       |
+//!                       start()                                 start()
+//!                         V                                       V
+//!             ..................| Overseer "runs" these |.......................
+//!             .  +--------------------+               +---------------------+  .
+//!             .  | SubsystemInstance1 | <-- bidir --> | SubsystemInstance2  |  .
+//!             .  +--------------------+               +---------------------+  .
+//!             ..................................................................
+//! ```
+
+// #![deny(unused_results)]
+// unused dependencies can not work for test and examples at the same time
+// yielding false positives
+#![deny(missing_docs)]
+#![deny(unused_crate_dependencies)]
+
+pub use polkadot_overseer_gen_proc_macro::overlord;
+
+#[doc(hidden)]
+pub use tracing;
+#[doc(hidden)]
+pub use metered;
+#[doc(hidden)]
+pub use sp_core::traits::SpawnNamed;
+
+#[doc(hidden)]
+pub use futures::{
+	self,
+	select,
+	StreamExt,
+	FutureExt,
+	poll,
+	future::{
+		Fuse, Future, BoxFuture
+	},
+	stream::{
+		self, select, FuturesUnordered,
+	},
+	task::{
+		Poll, Context,
+	},
+	channel::{mpsc, oneshot},
+};
+#[doc(hidden)]
+pub use std::pin::Pin;
+#[doc(hidden)]
+pub use async_trait::async_trait;
+
+#[doc(hidden)]
+pub use std::time::Duration;
+use std::sync::{Arc, atomic::{self, AtomicUsize}};
+
+#[doc(hidden)]
+pub use futures_timer::Delay;
+
+pub use polkadot_node_network_protocol::WrongVariant;
+
+use std::fmt;
+
+
+#[cfg(test)]
+mod tests;
+
+/// A type of messages that are sent from [`Subsystem`] to [`Overseer`].
+///
+/// Used to launch jobs.
+pub enum ToOverseer {
+	/// A message that wraps something the `Subsystem` is desiring to
+	/// spawn on the overseer and a `oneshot::Sender` to signal the result
+	/// of the spawn.
+	SpawnJob {
+		/// Name of the task to spawn which be shown in jaeger and tracing logs.
+		name: &'static str,
+		/// The future to execute.
+		s: BoxFuture<'static, ()>,
+	},
+
+	/// Same as `SpawnJob` but for blocking tasks to be executed on a
+	/// dedicated thread pool.
+	SpawnBlockingJob {
+		/// Name of the task to spawn which be shown in jaeger and tracing logs.
+		name: &'static str,
+		/// The future to execute.
+		s: BoxFuture<'static, ()>,
+	},
+}
+
+impl fmt::Debug for ToOverseer {
+	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+		match self {
+			Self::SpawnJob{ name, .. } => writeln!(f, "SpawnJob{{ {}, ..}}", name),
+			Self::SpawnBlockingJob{ name, .. } => writeln!(f, "SpawnBlockingJob{{ {}, ..}}", name),
+		}
+	}
+
+}
+
+
+
+/// A helper trait to map a subsystem to smth. else.
+pub trait MapSubsystem<T> {
+	/// The output type of the mapping.
+	type Output;
+
+	/// Consumes a `T` per subsystem, and maps it to `Self::Output`.
+	fn map_subsystem(&self, sub: T) -> Self::Output;
+}
+
+impl<F, T, U> MapSubsystem<T> for F where F: Fn(T) -> U {
+	type Output = U;
+
+	fn map_subsystem(&self, sub: T) -> U {
+		(self)(sub)
+	}
+}
+
+/// A wrapping type for messages.
+///
+/// Includes a counter to synchronize signals with messages,
+/// such that no inconsistent message sequences are prevented.
+#[derive(Debug)]
+pub struct MessagePacket<T> {
+	/// Signal level at the point of reception.
+	///
+	/// Required to assure signals were consumed _before_
+	/// consuming messages that are based on the assumption
+	/// that a certain signal was assumed.
+	pub signals_received: usize,
+	/// The message to be sent/consumed.
+	pub message: T,
+}
+
+/// Create a packet from its parts.
+pub fn make_packet<T>(signals_received: usize, message: T) -> MessagePacket<T> {
+	MessagePacket {
+		signals_received,
+		message,
+	}
+}
+
+/// Incoming messages from both the bounded and unbounded channel.
+pub type SubsystemIncomingMessages<M> = self::stream::Select<
+	self::metered::MeteredReceiver<MessagePacket<M>>,
+	self::metered::UnboundedMeteredReceiver<MessagePacket<M>>,
+>;
+
+
+/// Watermark to track the received signals.
+#[derive(Debug, Default, Clone)]
+pub struct SignalsReceived(Arc<AtomicUsize>);
+
+impl SignalsReceived {
+	/// Load the current value of received signals.
+	pub fn load(&self) -> usize {
+		// off by a few is ok
+		self.0.load(atomic::Ordering::Relaxed)
+	}
+
+	/// Increase the number of signals by one.
+	pub fn inc(&self) {
+		self.0.fetch_add(1, atomic::Ordering::Acquire);
+	}
+}
+
+
+
+/// A trait to support the origin annotation
+/// such that errors across subsystems can be easier tracked.
+pub trait AnnotateErrorOrigin: 'static + Send + Sync + std::error::Error {
+	/// Annotate the error with a origin `str`.
+	///
+	/// Commonly this is used to create nested enum variants.
+	///
+	/// ```rust,ignore
+	/// E::WithOrigin("I am originally from Cowtown.", E::Variant)
+	/// ```
+	fn with_origin(self, origin: &'static str) -> Self;
+}
+
+/// An asynchronous subsystem task..
+///
+/// In essence it's just a newtype wrapping a `BoxFuture`.
+pub struct SpawnedSubsystem<E>
+	where
+		E: std::error::Error
+			+ Send
+			+ Sync
+			+ 'static
+			+ From<self::OverseerError>,
+{
+	/// Name of the subsystem being spawned.
+	pub name: &'static str,
+	/// The task of the subsystem being spawned.
+	pub future: BoxFuture<'static, Result<(), E>>,
+}
+
+/// An error type that describes faults that may happen
+///
+/// These are:
+///   * Channels being closed
+///   * Subsystems dying when they are not expected to
+///   * Subsystems not dying when they are told to die
+///   * etc.
+#[derive(thiserror::Error, Debug)]
+#[allow(missing_docs)]
+pub enum OverseerError {
+	#[error(transparent)]
+	NotifyCancellation(#[from] oneshot::Canceled),
+
+	#[error(transparent)]
+	QueueError(#[from] mpsc::SendError),
+
+	#[error("Failed to spawn task {0}")]
+	TaskSpawn(&'static str),
+
+	#[error(transparent)]
+	Infallible(#[from] std::convert::Infallible),
+
+	#[error("Failed to {0}")]
+	Context(String),
+
+	#[error("Subsystem stalled: {0}")]
+	SubsystemStalled(&'static str),
+
+	/// Per origin (or subsystem) annotations to wrap an error.
+	#[error("Error originated in {origin}")]
+	FromOrigin {
+		/// An additional annotation tag for the origin of `source`.
+		origin: &'static str,
+		/// The wrapped error. Marked as source for tracking the error chain.
+		#[source] source: Box<dyn 'static + std::error::Error + Send + Sync>
+	},
+}
+
+/// Alias for a result with error type `OverseerError`.
+pub type OverseerResult<T> = std::result::Result<T, self::OverseerError>;
+
+/// Collection of meters related to a subsystem.
+#[derive(Clone)]
+pub struct SubsystemMeters {
+	#[allow(missing_docs)]
+	pub bounded: metered::Meter,
+	#[allow(missing_docs)]
+	pub unbounded: metered::Meter,
+	#[allow(missing_docs)]
+	pub signals: metered::Meter,
+}
+
+impl SubsystemMeters {
+	/// Read the values of all subsystem `Meter`s.
+	pub fn read(&self) -> SubsystemMeterReadouts {
+		SubsystemMeterReadouts {
+			bounded: self.bounded.read(),
+			unbounded: self.unbounded.read(),
+			signals: self.signals.read(),
+		}
+	}
+}
+
+
+/// Set of readouts of the `Meter`s of a subsystem.
+pub struct SubsystemMeterReadouts {
+	#[allow(missing_docs)]
+	pub bounded: metered::Readout,
+	#[allow(missing_docs)]
+	pub unbounded: metered::Readout,
+	#[allow(missing_docs)]
+	pub signals: metered::Readout,
+}
+
+/// A running instance of some [`Subsystem`].
+///
+/// [`Subsystem`]: trait.Subsystem.html
+///
+/// `M` here is the inner message type, and _not_ the generated `enum AllMessages`.
+pub struct SubsystemInstance<Message, Signal> {
+	/// Send sink for `Signal`s to be sent to a subsystem.
+	pub tx_signal: crate::metered::MeteredSender<Signal>,
+	/// Send sink for `Message`s to be sent to a subsystem.
+	pub tx_bounded: crate::metered::MeteredSender<MessagePacket<Message>>,
+	/// All meters of the particular subsystem instance.
+	pub meters: SubsystemMeters,
+	/// The number of signals already received.
+	/// Required to assure messages and signals
+	/// are processed correctly.
+	pub signals_received: usize,
+	/// Name of the subsystem instance.
+	pub name: &'static str,
+}
+
+/// A message type that a subsystem receives from an overseer.
+/// It wraps signals from an overseer and messages that are circulating
+/// between subsystems.
+///
+/// It is generic over over the message type `M` that a particular `Subsystem` may use.
+#[derive(Debug)]
+pub enum FromOverseer<Message, Signal> {
+	/// Signal from the `Overseer`.
+	Signal(Signal),
+
+	/// Some other `Subsystem`'s message.
+	Communication {
+		/// Contained message
+		msg: Message,
+	},
+}
+
+impl<Signal, Message> From<Signal> for FromOverseer<Message, Signal> {
+	fn from(signal: Signal) -> Self {
+		Self::Signal(signal)
+	}
+}
+
+/// A context type that is given to the [`Subsystem`] upon spawning.
+/// It can be used by [`Subsystem`] to communicate with other [`Subsystem`]s
+/// or spawn jobs.
+///
+/// [`Overseer`]: struct.Overseer.html
+/// [`SubsystemJob`]: trait.SubsystemJob.html
+#[async_trait::async_trait]
+pub trait SubsystemContext: Send + 'static {
+	/// The message type of this context. Subsystems launched with this context will expect
+	/// to receive messages of this type. Commonly uses the wrapping enum commonly called
+	/// `AllMessages`.
+	type Message: std::fmt::Debug + Send + 'static;
+	/// And the same for signals.
+	type Signal: std::fmt::Debug + Send + 'static;
+	/// The overarching all messages enum.
+	/// In some cases can be identical to `Self::Message`.
+	type AllMessages: From<Self::Message> + Send + 'static;
+	/// The sender type as provided by `sender()` and underlying.
+	type Sender: SubsystemSender<Self::AllMessages> + Send + 'static;
+	/// The error type.
+	type Error: ::std::error::Error + ::std::convert::From< OverseerError > + Sync + Send + 'static;
+
+	/// Try to asynchronously receive a message.
+	///
+	/// This has to be used with caution, if you loop over this without
+	/// using `pending!()` macro you will end up with a busy loop!
+	async fn try_recv(&mut self) -> Result<Option<FromOverseer<Self::Message, Self::Signal>>, ()>;
+
+	/// Receive a message.
+	async fn recv(&mut self) -> Result<FromOverseer<Self::Message, Self::Signal>, Self::Error>;
+
+	/// Spawn a child task on the executor.
+	fn spawn(
+		&mut self,
+		name: &'static str,
+		s: ::std::pin::Pin<Box<dyn crate::Future<Output = ()> + Send>>
+	) -> Result<(), Self::Error>;
+
+	/// Spawn a blocking child task on the executor's dedicated thread pool.
+	fn spawn_blocking(
+		&mut self,
+		name: &'static str,
+		s: ::std::pin::Pin<Box<dyn crate::Future<Output = ()> + Send>>,
+	) -> Result<(), Self::Error>;
+
+	/// Send a direct message to some other `Subsystem`, routed based on message type.
+	async fn send_message<X>(&mut self, msg: X)
+		where
+			Self::AllMessages: From<X>,
+			X: Send,
+	{
+		self.sender().send_message(<Self::AllMessages>::from(msg)).await
+	}
+
+	/// Send multiple direct messages to other `Subsystem`s, routed based on message type.
+	async fn send_messages<X, T>(&mut self, msgs: T)
+		where
+			T: IntoIterator<Item = X> + Send,
+			T::IntoIter: Send,
+			Self::AllMessages: From<X>,
+			X: Send,
+	{
+		self.sender().send_messages(msgs.into_iter().map(|x| <Self::AllMessages>::from(x))).await
+	}
+
+	/// Send a message using the unbounded connection.
+	fn send_unbounded_message<X>(&mut self, msg: X)
+	where
+		Self::AllMessages: From<X>,
+		X: Send,
+	{
+		self.sender().send_unbounded_message(Self::AllMessages::from(msg))
+	}
+
+	/// Obtain the sender.
+	fn sender(&mut self) -> &mut Self::Sender;
+}
+
+/// A trait that describes the [`Subsystem`]s that can run on the [`Overseer`].
+///
+/// It is generic over the message type circulating in the system.
+/// The idea that we want some type containing persistent state that
+/// can spawn actually running subsystems when asked.
+///
+/// [`Overseer`]: struct.Overseer.html
+/// [`Subsystem`]: trait.Subsystem.html
+pub trait Subsystem<Ctx, E>
+where
+	Ctx: SubsystemContext,
+	E: std::error::Error + Send + Sync + 'static + From<self::OverseerError>,
+{
+	/// Start this `Subsystem` and return `SpawnedSubsystem`.
+	fn start(self, ctx: Ctx) -> SpawnedSubsystem < E >;
+}
+
+
+/// Sender end of a channel to interface with a subsystem.
+#[async_trait::async_trait]
+pub trait SubsystemSender<Message>: Send + Clone + 'static {
+	/// Send a direct message to some other `Subsystem`, routed based on message type.
+	async fn send_message(&mut self, msg: Message);
+
+	/// Send multiple direct messages to other `Subsystem`s, routed based on message type.
+	async fn send_messages<T>(&mut self, msgs: T)
+		where T: IntoIterator<Item = Message> + Send, T::IntoIter: Send;
+
+	/// Send a message onto the unbounded queue of some other `Subsystem`, routed based on message
+	/// type.
+	///
+	/// This function should be used only when there is some other bounding factor on the messages
+	/// sent with it. Otherwise, it risks a memory leak.
+	fn send_unbounded_message(&mut self, msg: Message);
+}
+
+/// A future that wraps another future with a `Delay` allowing for time-limited futures.
+#[pin_project::pin_project]
+pub struct Timeout<F: Future> {
+	#[pin]
+	future: F,
+	#[pin]
+	delay: Delay,
+}
+
+/// Extends `Future` to allow time-limited futures.
+pub trait TimeoutExt: Future {
+	/// Adds a timeout of `duration` to the given `Future`.
+	/// Returns a new `Future`.
+	fn timeout(self, duration: Duration) -> Timeout<Self>
+	where
+		Self: Sized,
+	{
+		Timeout {
+			future: self,
+			delay: Delay::new(duration),
+		}
+	}
+}
+
+impl<F> TimeoutExt for F where F: Future{}
+
+impl<F> Future for Timeout<F> where F: Future {
+	type Output = Option<F::Output>;
+
+	fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Self::Output> {
+		let this = self.project();
+
+		if this.delay.poll(ctx).is_ready() {
+			return Poll::Ready(None);
+		}
+
+		if let Poll::Ready(output) = this.future.poll(ctx) {
+			return Poll::Ready(Some(output));
+		}
+
+		Poll::Pending
+	}
+}
diff --git a/polkadot/node/overseer/overseer-gen/src/tests.rs b/polkadot/node/overseer/overseer-gen/src/tests.rs
new file mode 100644
index 0000000000000000000000000000000000000000..1dc1d2e86d59ea3e879b73d6e831a8908e0b685c
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/src/tests.rs
@@ -0,0 +1,10 @@
+
+// The generated code requires quite a bit of surrounding code to work.
+// Please refer to [the examples](examples/dummy.rs) and
+// [the minimal usage example](../examples/minimal-example.rs).
+
+#[test]
+fn ui_compile_fail() {
+	let t = trybuild::TestCases::new();
+	t.compile_fail("tests/ui/err-*.rs");
+}
diff --git a/polkadot/node/overseer/overseer-gen/tests/ui/err-01-duplicate-consumer.rs b/polkadot/node/overseer/overseer-gen/tests/ui/err-01-duplicate-consumer.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b81f10a7f0fa99bc2725e0e471d995a417e660ea
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/tests/ui/err-01-duplicate-consumer.rs
@@ -0,0 +1,38 @@
+#![allow(dead_code)]
+
+use polkadot_overseer_gen::*;
+
+#[derive(Default)]
+struct AwesomeSubSys;
+
+#[derive(Default)]
+struct AwesomeSubSys2;
+
+#[derive(Clone, Debug)]
+struct SigSigSig;
+
+struct Event;
+
+#[derive(Clone)]
+struct MsgStrukt(u8);
+
+#[overlord(signal=SigSigSig, event=Event, gen=AllMessages, error=OverseerError)]
+struct Overseer {
+	#[subsystem(MsgStrukt)]
+	sub0: AwesomeSubSys,
+
+	#[subsystem(MsgStrukt)]
+	sub1: AwesomeSubSys2,
+}
+
+#[derive(Debug, Clone)]
+struct DummySpawner;
+
+struct DummyCtx;
+
+fn main() {
+	let overseer = Overseer::<_,_>::builder()
+		.sub0(AwesomeSubSys::default())
+		.spawner(DummySpawner)
+		.build(|| -> DummyCtx { DummyCtx } );
+}
diff --git a/polkadot/node/overseer/overseer-gen/tests/ui/err-01-duplicate-consumer.stderr b/polkadot/node/overseer/overseer-gen/tests/ui/err-01-duplicate-consumer.stderr
new file mode 100644
index 0000000000000000000000000000000000000000..edb0ef4369511cb90952e8b7103a1cf1264878b4
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/tests/ui/err-01-duplicate-consumer.stderr
@@ -0,0 +1,21 @@
+error[E0119]: conflicting implementations of trait `std::convert::From<MsgStrukt>` for type `AllMessages`
+  --> $DIR/err-01-duplicate-consumer.rs:19:1
+   |
+19 | #[overlord(signal=SigSigSig, event=Event, gen=AllMessages, error=OverseerError)]
+   | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   | |
+   | first implementation here
+   | conflicting implementation for `AllMessages`
+   |
+   = note: this error originates in an attribute macro (in Nightly builds, run with -Z macro-backtrace for more info)
+
+error[E0119]: conflicting implementations of trait `polkadot_overseer_gen::SubsystemSender<MsgStrukt>` for type `OverseerSubsystemSender`
+  --> $DIR/err-01-duplicate-consumer.rs:19:1
+   |
+19 | #[overlord(signal=SigSigSig, event=Event, gen=AllMessages, error=OverseerError)]
+   | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   | |
+   | first implementation here
+   | conflicting implementation for `OverseerSubsystemSender`
+   |
+   = note: this error originates in an attribute macro (in Nightly builds, run with -Z macro-backtrace for more info)
diff --git a/polkadot/node/overseer/overseer-gen/tests/ui/err-02-enum.rs b/polkadot/node/overseer/overseer-gen/tests/ui/err-02-enum.rs
new file mode 100644
index 0000000000000000000000000000000000000000..c7e491bfba9a779bd95c754cfea81c9f256684b5
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/tests/ui/err-02-enum.rs
@@ -0,0 +1,32 @@
+#![allow(dead_code)]
+
+use polkadot_overseer_gen::*;
+
+#[derive(Default)]
+struct AwesomeSubSys;
+
+struct SigSigSig;
+
+struct Event;
+
+#[derive(Clone, Debug)]
+struct MsgStrukt(u8);
+
+#[overlord(signal=SigSigSig, event=Event, gen=AllMessages, error=OverseerError)]
+enum Overseer {
+	#[subsystem(MsgStrukt)]
+	Sub0(AwesomeSubSys),
+}
+
+#[derive(Debug, Clone)]
+struct DummySpawner;
+
+struct DummyCtx;
+
+fn main() {
+	let overseer = Overseer::<_,_>::builder()
+		.sub0(AwesomeSubSys::default())
+		.i_like_pie(std::f64::consts::PI)
+		.spawner(DummySpawner)
+		.build(|| -> DummyCtx { DummyCtx } );
+}
diff --git a/polkadot/node/overseer/overseer-gen/tests/ui/err-02-enum.stderr b/polkadot/node/overseer/overseer-gen/tests/ui/err-02-enum.stderr
new file mode 100644
index 0000000000000000000000000000000000000000..7ed414a6ecb3b55ac12769d8077451d8b71b7480
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/tests/ui/err-02-enum.stderr
@@ -0,0 +1,11 @@
+error: expected `struct`
+  --> $DIR/err-02-enum.rs:16:1
+   |
+16 | enum Overseer {
+   | ^^^^
+
+error[E0433]: failed to resolve: use of undeclared type `Overseer`
+  --> $DIR/err-02-enum.rs:27:17
+   |
+27 |     let overseer = Overseer::<_,_>::builder()
+   |                    ^^^^^^^^ use of undeclared type `Overseer`
diff --git a/polkadot/node/overseer/overseer-gen/tests/ui/err-03-subsys-twice.rs b/polkadot/node/overseer/overseer-gen/tests/ui/err-03-subsys-twice.rs
new file mode 100644
index 0000000000000000000000000000000000000000..9a7ad951c8b70ade90349712ad2aed05c1a7fedc
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/tests/ui/err-03-subsys-twice.rs
@@ -0,0 +1,39 @@
+#![allow(dead_code)]
+
+use polkadot_overseer_gen::*;
+
+#[derive(Default)]
+struct AwesomeSubSys;
+
+#[derive(Clone, Debug)]
+struct SigSigSig;
+
+struct Event;
+
+#[derive(Clone, Debug)]
+struct MsgStrukt(u8);
+
+#[derive(Clone, Debug)]
+struct MsgStrukt2(f64);
+
+#[overlord(signal=SigSigSig, event=Event, gen=AllMessages, error=OverseerError)]
+struct Overseer {
+	#[subsystem(MsgStrukt)]
+	sub0: AwesomeSubSys,
+
+	#[subsystem(MsgStrukt2)]
+	sub1: AwesomeSubSys,
+}
+
+#[derive(Debug, Clone)]
+struct DummySpawner;
+
+struct DummyCtx;
+
+fn main() {
+	let overseer = Overseer::<_,_>::builder()
+		.sub0(AwesomeSubSys::default())
+		.i_like_pie(std::f64::consts::PI)
+		.spawner(DummySpawner)
+		.build(|| -> DummyCtx { DummyCtx } );
+}
diff --git a/polkadot/node/overseer/overseer-gen/tests/ui/err-03-subsys-twice.stderr b/polkadot/node/overseer/overseer-gen/tests/ui/err-03-subsys-twice.stderr
new file mode 100644
index 0000000000000000000000000000000000000000..cba46366daed779eab9139b59eb6a9b0cbaf1a8d
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/tests/ui/err-03-subsys-twice.stderr
@@ -0,0 +1,17 @@
+error: Duplicate subsystem names `AwesomeSubSys`
+  --> $DIR/err-03-subsys-twice.rs:25:8
+   |
+25 |     sub1: AwesomeSubSys,
+   |           ^^^^^^^^^^^^^
+
+error: previously defined here.
+  --> $DIR/err-03-subsys-twice.rs:22:8
+   |
+22 |     sub0: AwesomeSubSys,
+   |           ^^^^^^^^^^^^^
+
+error[E0433]: failed to resolve: use of undeclared type `Overseer`
+  --> $DIR/err-03-subsys-twice.rs:34:17
+   |
+34 |     let overseer = Overseer::<_,_>::builder()
+   |                    ^^^^^^^^ use of undeclared type `Overseer`
diff --git a/polkadot/node/overseer/overseer-gen/tests/ui/err-04-missing-error.rs b/polkadot/node/overseer/overseer-gen/tests/ui/err-04-missing-error.rs
new file mode 100644
index 0000000000000000000000000000000000000000..3b6966f1da791a224872b52e7bc848e6957d7b09
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/tests/ui/err-04-missing-error.rs
@@ -0,0 +1,36 @@
+#![allow(dead_code)]
+
+use polkadot_overseer_gen::*;
+
+#[derive(Default)]
+struct AwesomeSubSys;
+
+#[derive(Clone, Debug)]
+struct SigSigSig;
+
+struct Event;
+
+#[derive(Clone)]
+struct MsgStrukt(u8);
+
+#[overlord(signal=SigSigSig, event=Event, gen=AllMessages)]
+struct Overseer {
+	#[subsystem(no_dispatch, MsgStrukt)]
+	sub0: AwesomeSubSys,
+
+	i_like_pie: f64,
+}
+
+#[derive(Debug, Clone)]
+struct DummySpawner;
+
+struct DummyCtx;
+
+fn main() {
+	let _ = Overseer::builder()
+		.sub0(AwesomeSubSys::default())
+		.i_like_pie(std::f64::consts::PI)
+		.spawner(DummySpawner)
+		.build()
+		.unwrap();
+}
diff --git a/polkadot/node/overseer/overseer-gen/tests/ui/err-04-missing-error.stderr b/polkadot/node/overseer/overseer-gen/tests/ui/err-04-missing-error.stderr
new file mode 100644
index 0000000000000000000000000000000000000000..39e6a4a8fdc01b87d311470b9e771c3892b463e5
--- /dev/null
+++ b/polkadot/node/overseer/overseer-gen/tests/ui/err-04-missing-error.stderr
@@ -0,0 +1,13 @@
+error: Must declare the overseer error type via `error=..`.
+  --> $DIR/err-04-missing-error.rs:16:1
+   |
+16 | #[overlord(signal=SigSigSig, event=Event, gen=AllMessages)]
+   | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |
+   = note: this error originates in an attribute macro (in Nightly builds, run with -Z macro-backtrace for more info)
+
+error[E0433]: failed to resolve: use of undeclared type `Overseer`
+  --> $DIR/err-04-missing-error.rs:30:10
+   |
+30 |     let _ = Overseer::builder()
+   |             ^^^^^^^^ use of undeclared type `Overseer`
diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs
index cb28797ab3e284672b5efeab9ba48bee23c82182..7867ed309aecbe6983682967eac608c1bdf53a5c 100644
--- a/polkadot/node/overseer/src/lib.rs
+++ b/polkadot/node/overseer/src/lib.rs
@@ -22,7 +22,7 @@
 //! check out that guide, documentation in this crate will be mostly discussing
 //! technical stuff.
 //!
-//! An `Overseer` is something that allows spawning/stopping and overseing
+//! An `Overseer` is something that allows spawning/stopping and overseeing
 //! asynchronous tasks as well as establishing a well-defined and easy to use
 //! protocol that the tasks can use to communicate with each other. It is desired
 //! that this protocol is the only way tasks communicate with each other, however
@@ -61,69 +61,85 @@
 
 use std::fmt::{self, Debug};
 use std::pin::Pin;
-use std::sync::{atomic::{self, AtomicUsize}, Arc};
-use std::task::Poll;
+use std::sync::Arc;
 use std::time::Duration;
 use std::collections::{hash_map, HashMap};
+use std::iter::FromIterator;
 
 use futures::channel::oneshot;
 use futures::{
-	poll, select,
+	select,
 	future::BoxFuture,
-	stream::{self, FuturesUnordered, Fuse},
 	Future, FutureExt, StreamExt,
 };
-use futures_timer::Delay;
 use lru::LruCache;
 
 use polkadot_primitives::v1::{Block, BlockId,BlockNumber, Hash, ParachainHost};
 use client::{BlockImportNotification, BlockchainEvents, FinalityNotification};
 use sp_api::{ApiExt, ProvideRuntimeApi};
 
-use polkadot_subsystem::messages::{
+use polkadot_node_network_protocol::{
+	v1 as protocol_v1,
+};
+use polkadot_node_subsystem_types::messages::{
 	CandidateValidationMessage, CandidateBackingMessage,
 	ChainApiMessage, StatementDistributionMessage,
 	AvailabilityDistributionMessage, BitfieldSigningMessage, BitfieldDistributionMessage,
 	ProvisionerMessage, RuntimeApiMessage,
-	AvailabilityStoreMessage, NetworkBridgeMessage, AllMessages, CollationGenerationMessage,
+	AvailabilityStoreMessage, NetworkBridgeMessage, CollationGenerationMessage,
 	CollatorProtocolMessage, AvailabilityRecoveryMessage, ApprovalDistributionMessage,
 	ApprovalVotingMessage, GossipSupportMessage,
+	NetworkBridgeEvent,
+	DisputeParticipationMessage, DisputeCoordinatorMessage, ChainSelectionMessage,
 };
-pub use polkadot_subsystem::{
-	Subsystem, SubsystemContext, SubsystemSender, OverseerSignal, FromOverseer, SubsystemError,
-	SubsystemResult, SpawnedSubsystem, ActiveLeavesUpdate, ActivatedLeaf, DummySubsystem, jaeger,
-	LeafStatus,
+pub use polkadot_node_subsystem_types::{
+	OverseerSignal,
+	errors::{SubsystemResult, SubsystemError,},
+	ActiveLeavesUpdate, ActivatedLeaf, LeafStatus,
+	jaeger,
 };
-use polkadot_node_subsystem_util::{TimeoutExt, metrics::{self, prometheus}, metered, Metronome};
-use polkadot_node_primitives::SpawnNamed;
-use polkadot_procmacro_overseer_subsystems_gen::AllSubsystemsGen;
-
-#[cfg(test)]
-mod tests;
 
-// A capacity of bounded channels inside the overseer.
-const CHANNEL_CAPACITY: usize = 1024;
-// The capacity of signal channels to subsystems.
-const SIGNAL_CHANNEL_CAPACITY: usize = 64;
+// TODO legacy, to be deleted, left for easier integration
+// TODO https://github.com/paritytech/polkadot/issues/3427
+mod subsystems;
+pub use self::subsystems::AllSubsystems;
 
-// A graceful `Overseer` teardown time delay.
-const STOP_DELAY: u64 = 1;
-// Target for logs.
-const LOG_TARGET: &'static str = "parachain::overseer";
+mod metrics;
+use self::metrics::Metrics;
 
-trait MapSubsystem<T> {
-	type Output;
+use polkadot_node_metrics::{
+	metrics::{
+		prometheus,
+		Metrics as MetricsTrait
+	},
+	Metronome,
+};
+pub use polkadot_overseer_gen::{
+	TimeoutExt,
+	SpawnNamed,
+	Subsystem,
+	SubsystemMeterReadouts,
+	SubsystemMeters,
+	SubsystemIncomingMessages,
+	SubsystemInstance,
+	SubsystemSender,
+	SubsystemContext,
+	overlord,
+	MessagePacket,
+	SignalsReceived,
+	FromOverseer,
+	ToOverseer,
+	MapSubsystem,
+};
+pub use polkadot_overseer_gen as gen;
 
-	fn map_subsystem(&self, sub: T) -> Self::Output;
-}
+/// Store 2 days worth of blocks, not accounting for forks,
+/// in the LRU cache. Assumes a 6-second block time.
+const KNOWN_LEAVES_CACHE_SIZE: usize = 2 * 24 * 3600 / 6;
 
-impl<F, T, U> MapSubsystem<T> for F where F: Fn(T) -> U {
-	type Output = U;
+#[cfg(test)]
+mod tests;
 
-	fn map_subsystem(&self, sub: T) -> U {
-		(self)(sub)
-	}
-}
 
 /// Whether a header supports parachain consensus or not.
 pub trait HeadSupportsParachains {
@@ -141,224 +157,75 @@ impl<Client> HeadSupportsParachains for Arc<Client> where
 	}
 }
 
-/// This struct is passed as an argument to create a new instance of an [`Overseer`].
-///
-/// As any entity that satisfies the interface may act as a [`Subsystem`] this allows
-/// mocking in the test code:
+
+/// A handler used to communicate with the [`Overseer`].
 ///
-/// Each [`Subsystem`] is supposed to implement some interface that is generic over
-/// message type that is specific to this [`Subsystem`]. At the moment not all
-/// subsystems are implemented and the rest can be mocked with the [`DummySubsystem`].
-#[derive(Debug, Clone, AllSubsystemsGen)]
-pub struct AllSubsystems<
-	CV = (), CB = (), SD = (), AD = (), AR = (), BS = (), BD = (), P = (),
-	RA = (), AS = (), NB = (), CA = (), CG = (), CP = (), ApD = (), ApV = (),
-	GS = (),
-> {
-	/// A candidate validation subsystem.
-	pub candidate_validation: CV,
-	/// A candidate backing subsystem.
-	pub candidate_backing: CB,
-	/// A statement distribution subsystem.
-	pub statement_distribution: SD,
-	/// An availability distribution subsystem.
-	pub availability_distribution: AD,
-	/// An availability recovery subsystem.
-	pub availability_recovery: AR,
-	/// A bitfield signing subsystem.
-	pub bitfield_signing: BS,
-	/// A bitfield distribution subsystem.
-	pub bitfield_distribution: BD,
-	/// A provisioner subsystem.
-	pub provisioner: P,
-	/// A runtime API subsystem.
-	pub runtime_api: RA,
-	/// An availability store subsystem.
-	pub availability_store: AS,
-	/// A network bridge subsystem.
-	pub network_bridge: NB,
-	/// A Chain API subsystem.
-	pub chain_api: CA,
-	/// A Collation Generation subsystem.
-	pub collation_generation: CG,
-	/// A Collator Protocol subsystem.
-	pub collator_protocol: CP,
-	/// An Approval Distribution subsystem.
-	pub approval_distribution: ApD,
-	/// An Approval Voting subsystem.
-	pub approval_voting: ApV,
-	/// A Connection Request Issuer subsystem.
-	pub gossip_support: GS,
-}
+/// [`Overseer`]: struct.Overseer.html
+#[derive(Clone)]
+pub struct Handle(pub OverseerHandle);
 
-impl<CV, CB, SD, AD, AR, BS, BD, P, RA, AS, NB, CA, CG, CP, ApD, ApV, GS>
-	AllSubsystems<CV, CB, SD, AD, AR, BS, BD, P, RA, AS, NB, CA, CG, CP, ApD, ApV, GS>
-{
-	/// Create a new instance of [`AllSubsystems`].
-	///
-	/// Each subsystem is set to [`DummySystem`].
-	///
-	///# Note
-	///
-	/// Because of a bug in rustc it is required that when calling this function,
-	/// you provide a "random" type for the first generic parameter:
+impl Handle {
+	/// Inform the `Overseer` that that some block was imported.
+	pub async fn block_imported(&mut self, block: BlockInfo) {
+		self.send_and_log_error(Event::BlockImported(block)).await
+	}
+
+	/// Send some message to one of the `Subsystem`s.
+	pub async fn send_msg(&mut self, msg: impl Into<AllMessages>, origin: &'static str) {
+		self.send_and_log_error(Event::MsgToSubsystem { msg: msg.into(), origin }).await
+	}
+
+	/// Send a message not providing an origin.
+	#[inline(always)]
+	pub async fn send_msg_anon(&mut self, msg: impl Into<AllMessages>) {
+		self.send_msg(msg, "").await
+	}
+
+	/// Inform the `Overseer` that some block was finalized.
+	pub async fn block_finalized(&mut self, block: BlockInfo) {
+		self.send_and_log_error(Event::BlockFinalized(block)).await
+	}
+
+	/// Wait for a block with the given hash to be in the active-leaves set.
 	///
-	/// ```
-	/// polkadot_overseer::AllSubsystems::<()>::dummy();
-	/// ```
-	pub fn dummy() -> AllSubsystems<
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-		DummySubsystem,
-	> {
-		AllSubsystems {
-			candidate_validation: DummySubsystem,
-			candidate_backing: DummySubsystem,
-			statement_distribution: DummySubsystem,
-			availability_distribution: DummySubsystem,
-			availability_recovery: DummySubsystem,
-			bitfield_signing: DummySubsystem,
-			bitfield_distribution: DummySubsystem,
-			provisioner: DummySubsystem,
-			runtime_api: DummySubsystem,
-			availability_store: DummySubsystem,
-			network_bridge: DummySubsystem,
-			chain_api: DummySubsystem,
-			collation_generation: DummySubsystem,
-			collator_protocol: DummySubsystem,
-			approval_distribution: DummySubsystem,
-			approval_voting: DummySubsystem,
-			gossip_support: DummySubsystem,
-		}
+	/// The response channel responds if the hash was activated and is closed if the hash was deactivated.
+	/// Note that due the fact the overseer doesn't store the whole active-leaves set, only deltas,
+	/// the response channel may never return if the hash was deactivated before this call.
+	/// In this case, it's the caller's responsibility to ensure a timeout is set.
+	pub async fn wait_for_activation(&mut self, hash: Hash, response_channel: oneshot::Sender<SubsystemResult<()>>) {
+		self.send_and_log_error(Event::ExternalRequest(ExternalRequest::WaitForActivation {
+				hash,
+				response_channel
+		})).await;
 	}
 
-	fn as_ref(&self) -> AllSubsystems<&'_ CV, &'_ CB, &'_ SD, &'_ AD, &'_ AR, &'_ BS, &'_ BD, &'_ P, &'_ RA, &'_ AS, &'_ NB, &'_ CA, &'_ CG, &'_ CP, &'_ ApD, &'_ ApV, &'_ GS> {
-		AllSubsystems {
-			candidate_validation: &self.candidate_validation,
-			candidate_backing: &self.candidate_backing,
-			statement_distribution: &self.statement_distribution,
-			availability_distribution: &self.availability_distribution,
-			availability_recovery: &self.availability_recovery,
-			bitfield_signing: &self.bitfield_signing,
-			bitfield_distribution: &self.bitfield_distribution,
-			provisioner: &self.provisioner,
-			runtime_api: &self.runtime_api,
-			availability_store: &self.availability_store,
-			network_bridge: &self.network_bridge,
-			chain_api: &self.chain_api,
-			collation_generation: &self.collation_generation,
-			collator_protocol: &self.collator_protocol,
-			approval_distribution: &self.approval_distribution,
-			approval_voting: &self.approval_voting,
-			gossip_support: &self.gossip_support,
-		}
+	/// Tell `Overseer` to shutdown.
+	pub async fn stop(&mut self) {
+		self.send_and_log_error(Event::Stop).await;
 	}
 
-	fn map_subsystems<M>(self, m: M)
-		-> AllSubsystems<
-			<M as MapSubsystem<CV>>::Output,
-			<M as MapSubsystem<CB>>::Output,
-			<M as MapSubsystem<SD>>::Output,
-			<M as MapSubsystem<AD>>::Output,
-			<M as MapSubsystem<AR>>::Output,
-			<M as MapSubsystem<BS>>::Output,
-			<M as MapSubsystem<BD>>::Output,
-			<M as MapSubsystem<P>>::Output,
-			<M as MapSubsystem<RA>>::Output,
-			<M as MapSubsystem<AS>>::Output,
-			<M as MapSubsystem<NB>>::Output,
-			<M as MapSubsystem<CA>>::Output,
-			<M as MapSubsystem<CG>>::Output,
-			<M as MapSubsystem<CP>>::Output,
-			<M as MapSubsystem<ApD>>::Output,
-			<M as MapSubsystem<ApV>>::Output,
-			<M as MapSubsystem<GS>>::Output,
-		>
-	where
-		M: MapSubsystem<CV>,
-		M: MapSubsystem<CB>,
-		M: MapSubsystem<SD>,
-		M: MapSubsystem<AD>,
-		M: MapSubsystem<AR>,
-		M: MapSubsystem<BS>,
-		M: MapSubsystem<BD>,
-		M: MapSubsystem<P>,
-		M: MapSubsystem<RA>,
-		M: MapSubsystem<AS>,
-		M: MapSubsystem<NB>,
-		M: MapSubsystem<CA>,
-		M: MapSubsystem<CG>,
-		M: MapSubsystem<CP>,
-		M: MapSubsystem<ApD>,
-		M: MapSubsystem<ApV>,
-		M: MapSubsystem<GS>,
-	{
-		AllSubsystems {
-			candidate_validation: m.map_subsystem(self.candidate_validation),
-			candidate_backing: m.map_subsystem(self.candidate_backing),
-			statement_distribution: m.map_subsystem(self.statement_distribution),
-			availability_distribution: m.map_subsystem(self.availability_distribution),
-			availability_recovery: m.map_subsystem(self.availability_recovery),
-			bitfield_signing: m.map_subsystem(self.bitfield_signing),
-			bitfield_distribution: m.map_subsystem(self.bitfield_distribution),
-			provisioner: m.map_subsystem(self.provisioner),
-			runtime_api: m.map_subsystem(self.runtime_api),
-			availability_store: m.map_subsystem(self.availability_store),
-			network_bridge: m.map_subsystem(self.network_bridge),
-			chain_api: m.map_subsystem(self.chain_api),
-			collation_generation: m.map_subsystem(self.collation_generation),
-			collator_protocol: m.map_subsystem(self.collator_protocol),
-			approval_distribution: m.map_subsystem(self.approval_distribution),
-			approval_voting: m.map_subsystem(self.approval_voting),
-			gossip_support: m.map_subsystem(self.gossip_support),
+	/// Most basic operation, to stop a server.
+	async fn send_and_log_error(&mut self, event: Event) {
+		if self.0.send(event).await.is_err() {
+			tracing::info!(target: LOG_TARGET, "Failed to send an event to Overseer");
 		}
 	}
-}
 
-type AllSubsystemsSame<T> = AllSubsystems<
-	T, T, T, T, T,
-	T, T, T, T, T,
-	T, T, T, T, T,
-	T, T,
->;
+	/// Whether the overseer handler is connected to an overseer.
+	pub fn is_connected(&self) -> bool {
+		true
+	}
 
-/// A type of messages that are sent from [`Subsystem`] to [`Overseer`].
-///
-/// It wraps a system-wide [`AllMessages`] type that represents all possible
-/// messages in the system.
-///
-/// [`AllMessages`]: enum.AllMessages.html
-/// [`Subsystem`]: trait.Subsystem.html
-/// [`Overseer`]: struct.Overseer.html
-enum ToOverseer {
-	/// A message that wraps something the `Subsystem` is desiring to
-	/// spawn on the overseer and a `oneshot::Sender` to signal the result
-	/// of the spawn.
-	SpawnJob {
-		name: &'static str,
-		s: BoxFuture<'static, ()>,
-	},
+	/// Whether the handler is disconnected.
+	pub fn is_disconnected(&self) -> bool {
+		false
+	}
 
-	/// Same as `SpawnJob` but for blocking tasks to be executed on a
-	/// dedicated thread pool.
-	SpawnBlockingJob {
-		name: &'static str,
-		s: BoxFuture<'static, ()>,
-	},
+	/// Using this handler, connect another handler to the same
+	/// overseer, if any.
+	pub fn connect_other(&self, other: &mut Handle) {
+		*other = self.clone();
+	}
 }
 
 /// An event telling the `Overseer` on the particular block
@@ -397,116 +264,43 @@ impl From<FinalityNotification<Block>> for BlockInfo {
 	}
 }
 
-/// Some event from the outer world.
-enum Event {
+/// An event from outside the overseer scope, such
+/// as the substrate framework or user interaction.
+pub enum Event {
+	/// A new block was imported.
 	BlockImported(BlockInfo),
+	/// A block was finalized with i.e. babe or another consensus algorithm.
 	BlockFinalized(BlockInfo),
+	/// Message as sent to a subsystem.
 	MsgToSubsystem {
+		/// The actual message.
 		msg: AllMessages,
+		/// The originating subsystem name.
 		origin: &'static str,
 	},
+	/// A request from the outer world.
 	ExternalRequest(ExternalRequest),
+	/// Stop the overseer on i.e. a UNIX signal.
 	Stop,
 }
 
 /// Some request from outer world.
-enum ExternalRequest {
+pub enum ExternalRequest {
+	/// Wait for the activation of a particular hash
+	/// and be notified by means of the return channel.
 	WaitForActivation {
+		/// The relay parent for which activation to wait for.
 		hash: Hash,
+		/// Response channel to await on.
 		response_channel: oneshot::Sender<SubsystemResult<()>>,
 	},
 }
 
-/// A handler used to communicate with the [`Overseer`].
-///
-/// [`Overseer`]: struct.Overseer.html
-#[derive(Clone)]
-pub struct OverseerHandler {
-	events_tx: Option<metered::MeteredSender<Event>>,
-}
-
-impl OverseerHandler {
-	/// Create a disconnected overseer handler.
-	pub fn disconnected() -> Self {
-		OverseerHandler {
-			events_tx: None,
-		}
-	}
-
-	/// Whether the overseer handler is connected to an overseer.
-	pub fn is_connected(&self) -> bool {
-		self.events_tx.is_some()
-	}
-
-	/// Whether the handler is disconnected.
-	pub fn is_disconnected(&self) -> bool {
-		self.events_tx.is_none()
-	}
-
-	/// Using this handler, connect another handler to the same
-	/// overseer, if any.
-	pub fn connect_other(&self, other: &mut OverseerHandler) {
-		other.events_tx = self.events_tx.clone();
-	}
-
-	/// Inform the `Overseer` that that some block was imported.
-	pub async fn block_imported(&mut self, block: BlockInfo) {
-		self.send_and_log_error(Event::BlockImported(block)).await
-	}
-
-	/// Send some message to one of the `Subsystem`s.
-	pub async fn send_msg(&mut self, msg: impl Into<AllMessages>, origin: &'static str) {
-		self.send_and_log_error(Event::MsgToSubsystem {
-			msg: msg.into(),
-			origin,
-		}).await
-	}
-
-	/// Same as `send_msg`, but with no origin. Used for tests.
-	pub async fn send_msg_anon(&mut self, msg: impl Into<AllMessages>) {
-		self.send_msg(msg, "").await
-	}
-
-	/// Inform the `Overseer` that some block was finalized.
-	pub async fn block_finalized(&mut self, block: BlockInfo) {
-		self.send_and_log_error(Event::BlockFinalized(block)).await
-	}
-
-	/// Wait for a block with the given hash to be in the active-leaves set.
-	///
-	/// The response channel responds if the hash was activated and is closed if the hash was deactivated.
-	/// Note that due the fact the overseer doesn't store the whole active-leaves set, only deltas,
-	/// the response channel may never return if the hash was deactivated before this call.
-	/// In this case, it's the caller's responsibility to ensure a timeout is set.
-	pub async fn wait_for_activation(&mut self, hash: Hash, response_channel: oneshot::Sender<SubsystemResult<()>>) {
-		self.send_and_log_error(Event::ExternalRequest(ExternalRequest::WaitForActivation {
-			hash,
-			response_channel
-		})).await
-	}
-
-	/// Tell `Overseer` to shutdown.
-	pub async fn stop(&mut self) {
-		self.send_and_log_error(Event::Stop).await
-	}
-
-	async fn send_and_log_error(&mut self, event: Event) {
-		if let Some(ref mut events_tx) = self.events_tx {
-			if events_tx.send(event).await.is_err() {
-				tracing::info!(target: LOG_TARGET, "Failed to send an event to Overseer");
-			}
-		}
-	}
-}
-
 /// Glues together the [`Overseer`] and `BlockchainEvents` by forwarding
-/// import and finality notifications into the [`OverseerHandler`].
-///
-/// [`Overseer`]: struct.Overseer.html
-/// [`OverseerHandler`]: struct.OverseerHandler.html
+/// import and finality notifications into the [`OverseerHandle`].
 pub async fn forward_events<P: BlockchainEvents<Block>>(
 	client: Arc<P>,
-	mut handler: OverseerHandler,
+	mut handler: Handle,
 ) {
 	let mut finality = client.finality_notification_stream();
 	let mut imports = client.import_notification_stream();
@@ -534,802 +328,108 @@ pub async fn forward_events<P: BlockchainEvents<Block>>(
 	}
 }
 
-impl Debug for ToOverseer {
-	fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-		match self {
-			ToOverseer::SpawnJob { .. } => write!(f, "OverseerMessage::Spawn(..)"),
-			ToOverseer::SpawnBlockingJob { .. } => write!(f, "OverseerMessage::SpawnBlocking(..)")
-		}
-	}
-}
-
-/// A running instance of some [`Subsystem`].
-///
-/// [`Subsystem`]: trait.Subsystem.html
-struct SubsystemInstance<M> {
-	tx_signal: metered::MeteredSender<OverseerSignal>,
-	tx_bounded: metered::MeteredSender<MessagePacket<M>>,
-	meters: SubsystemMeters,
-	signals_received: usize,
-	name: &'static str,
-}
-
-#[derive(Debug)]
-struct MessagePacket<T> {
-	signals_received: usize,
-	message: T,
-}
-
-fn make_packet<T>(signals_received: usize, message: T) -> MessagePacket<T> {
-	MessagePacket {
-		signals_received,
-		message,
-	}
-}
-
-// The channels held by every subsystem to communicate with every other subsystem.
-#[derive(Debug, Clone)]
-struct ChannelsOut {
-	candidate_validation: metered::MeteredSender<MessagePacket<CandidateValidationMessage>>,
-	candidate_backing: metered::MeteredSender<MessagePacket<CandidateBackingMessage>>,
-	statement_distribution: metered::MeteredSender<MessagePacket<StatementDistributionMessage>>,
-	availability_distribution: metered::MeteredSender<MessagePacket<AvailabilityDistributionMessage>>,
-	availability_recovery: metered::MeteredSender<MessagePacket<AvailabilityRecoveryMessage>>,
-	bitfield_signing: metered::MeteredSender<MessagePacket<BitfieldSigningMessage>>,
-	bitfield_distribution: metered::MeteredSender<MessagePacket<BitfieldDistributionMessage>>,
-	provisioner: metered::MeteredSender<MessagePacket<ProvisionerMessage>>,
-	runtime_api: metered::MeteredSender<MessagePacket<RuntimeApiMessage>>,
-	availability_store: metered::MeteredSender<MessagePacket<AvailabilityStoreMessage>>,
-	network_bridge: metered::MeteredSender<MessagePacket<NetworkBridgeMessage>>,
-	chain_api: metered::MeteredSender<MessagePacket<ChainApiMessage>>,
-	collation_generation: metered::MeteredSender<MessagePacket<CollationGenerationMessage>>,
-	collator_protocol: metered::MeteredSender<MessagePacket<CollatorProtocolMessage>>,
-	approval_distribution: metered::MeteredSender<MessagePacket<ApprovalDistributionMessage>>,
-	approval_voting: metered::MeteredSender<MessagePacket<ApprovalVotingMessage>>,
-	gossip_support: metered::MeteredSender<MessagePacket<GossipSupportMessage>>,
-
-	candidate_validation_unbounded: metered::UnboundedMeteredSender<MessagePacket<CandidateValidationMessage>>,
-	candidate_backing_unbounded: metered::UnboundedMeteredSender<MessagePacket<CandidateBackingMessage>>,
-	statement_distribution_unbounded: metered::UnboundedMeteredSender<MessagePacket<StatementDistributionMessage>>,
-	availability_distribution_unbounded: metered::UnboundedMeteredSender<MessagePacket<AvailabilityDistributionMessage>>,
-	availability_recovery_unbounded: metered::UnboundedMeteredSender<MessagePacket<AvailabilityRecoveryMessage>>,
-	bitfield_signing_unbounded: metered::UnboundedMeteredSender<MessagePacket<BitfieldSigningMessage>>,
-	bitfield_distribution_unbounded: metered::UnboundedMeteredSender<MessagePacket<BitfieldDistributionMessage>>,
-	provisioner_unbounded: metered::UnboundedMeteredSender<MessagePacket<ProvisionerMessage>>,
-	runtime_api_unbounded: metered::UnboundedMeteredSender<MessagePacket<RuntimeApiMessage>>,
-	availability_store_unbounded: metered::UnboundedMeteredSender<MessagePacket<AvailabilityStoreMessage>>,
-	network_bridge_unbounded: metered::UnboundedMeteredSender<MessagePacket<NetworkBridgeMessage>>,
-	chain_api_unbounded: metered::UnboundedMeteredSender<MessagePacket<ChainApiMessage>>,
-	collation_generation_unbounded: metered::UnboundedMeteredSender<MessagePacket<CollationGenerationMessage>>,
-	collator_protocol_unbounded: metered::UnboundedMeteredSender<MessagePacket<CollatorProtocolMessage>>,
-	approval_distribution_unbounded: metered::UnboundedMeteredSender<MessagePacket<ApprovalDistributionMessage>>,
-	approval_voting_unbounded: metered::UnboundedMeteredSender<MessagePacket<ApprovalVotingMessage>>,
-	gossip_support_unbounded: metered::UnboundedMeteredSender<MessagePacket<GossipSupportMessage>>,
-}
-
-impl ChannelsOut {
-	async fn send_and_log_error(
-		&mut self,
-		signals_received: usize,
-		message: AllMessages,
-	) {
-		let res = match message {
-			AllMessages::CandidateValidation(msg) => {
-				self.candidate_validation.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::CandidateBacking(msg) => {
-				self.candidate_backing.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::StatementDistribution(msg) => {
-				self.statement_distribution.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::AvailabilityDistribution(msg) => {
-				self.availability_distribution.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::AvailabilityRecovery(msg) => {
-				self.availability_recovery.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::BitfieldDistribution(msg) => {
-				self.bitfield_distribution.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::BitfieldSigning(msg) => {
-				self.bitfield_signing.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::Provisioner(msg) => {
-				self.provisioner.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::RuntimeApi(msg) => {
-				self.runtime_api.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::AvailabilityStore(msg) => {
-				self.availability_store.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::NetworkBridge(msg) => {
-				self.network_bridge.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::ChainApi(msg) => {
-				self.chain_api.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::CollationGeneration(msg) => {
-				self.collation_generation.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::CollatorProtocol(msg) => {
-				self.collator_protocol.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::ApprovalDistribution(msg) => {
-				self.approval_distribution.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::ApprovalVoting(msg) => {
-				self.approval_voting.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::GossipSupport(msg) => {
-				self.gossip_support.send(make_packet(signals_received, msg)).await
-			},
-			AllMessages::DisputeCoordinator(_) => Ok(()),
-			AllMessages::DisputeParticipation(_) => Ok(()),
-			AllMessages::ChainSelection(_) => Ok(()),
-		};
-
-		if res.is_err() {
-			tracing::debug!(
-				target: LOG_TARGET,
-				"Failed to send a message to another subsystem",
-			);
-		}
-	}
-
-
-	fn send_unbounded_and_log_error(
-		&self,
-		signals_received: usize,
-		message: AllMessages,
-	) {
-		let res = match message {
-			AllMessages::CandidateValidation(msg) => {
-				self.candidate_validation_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::CandidateBacking(msg) => {
-				self.candidate_backing_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::StatementDistribution(msg) => {
-				self.statement_distribution_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::AvailabilityDistribution(msg) => {
-				self.availability_distribution_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::AvailabilityRecovery(msg) => {
-				self.availability_recovery_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::BitfieldDistribution(msg) => {
-				self.bitfield_distribution_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::BitfieldSigning(msg) => {
-				self.bitfield_signing_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::Provisioner(msg) => {
-				self.provisioner_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::RuntimeApi(msg) => {
-				self.runtime_api_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::AvailabilityStore(msg) => {
-				self.availability_store_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::NetworkBridge(msg) => {
-				self.network_bridge_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::ChainApi(msg) => {
-				self.chain_api_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::CollationGeneration(msg) => {
-				self.collation_generation_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::CollatorProtocol(msg) => {
-				self.collator_protocol_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::ApprovalDistribution(msg) => {
-				self.approval_distribution_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::ApprovalVoting(msg) => {
-				self.approval_voting_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::GossipSupport(msg) => {
-				self.gossip_support_unbounded
-					.unbounded_send(make_packet(signals_received, msg))
-					.map_err(|e| e.into_send_error())
-			},
-			AllMessages::DisputeCoordinator(_) => Ok(()),
-			AllMessages::DisputeParticipation(_) => Ok(()),
-			AllMessages::ChainSelection(_) => Ok(()),
-		};
-
-		if res.is_err() {
-			tracing::debug!(
-				target: LOG_TARGET,
-				"Failed to send a message to another subsystem",
-			);
-		}
-	}
-}
-
-type SubsystemIncomingMessages<M> = stream::Select<
-	metered::MeteredReceiver<MessagePacket<M>>,
-	metered::UnboundedMeteredReceiver<MessagePacket<M>>,
->;
-
-#[derive(Debug, Default, Clone)]
-struct SignalsReceived(Arc<AtomicUsize>);
-
-impl SignalsReceived {
-	fn load(&self) -> usize {
-		self.0.load(atomic::Ordering::SeqCst)
-	}
-
-	fn inc(&self) {
-		self.0.fetch_add(1, atomic::Ordering::SeqCst);
-	}
-}
-
-/// A sender from subsystems to other subsystems.
-#[derive(Debug, Clone)]
-pub struct OverseerSubsystemSender {
-	channels: ChannelsOut,
-	signals_received: SignalsReceived,
-}
-
-#[async_trait::async_trait]
-impl SubsystemSender for OverseerSubsystemSender {
-	async fn send_message(&mut self, msg: AllMessages) {
-		let needed_signals = self.signals_received.load();
-		self.channels.send_and_log_error(needed_signals, msg).await;
-	}
-
-	async fn send_messages<T>(&mut self, msgs: T)
-		where T: IntoIterator<Item = AllMessages> + Send, T::IntoIter: Send
-	{
-		// This can definitely be optimized if necessary.
-		for msg in msgs {
-			self.send_message(msg).await;
-		}
-	}
-
-	fn send_unbounded_message(&mut self, msg: AllMessages) {
-		self.channels.send_unbounded_and_log_error(self.signals_received.load(), msg);
-	}
-}
-
-/// A context type that is given to the [`Subsystem`] upon spawning.
-/// It can be used by [`Subsystem`] to communicate with other [`Subsystem`]s
-/// or to spawn it's [`SubsystemJob`]s.
-///
-/// [`Overseer`]: struct.Overseer.html
-/// [`Subsystem`]: trait.Subsystem.html
-/// [`SubsystemJob`]: trait.SubsystemJob.html
-#[derive(Debug)]
-pub struct OverseerSubsystemContext<M>{
-	signals: metered::MeteredReceiver<OverseerSignal>,
-	messages: SubsystemIncomingMessages<M>,
-	to_subsystems: OverseerSubsystemSender,
-	to_overseer: metered::UnboundedMeteredSender<ToOverseer>,
-	signals_received: SignalsReceived,
-	pending_incoming: Option<(usize, M)>,
-	metrics: Metrics,
-}
-
-impl<M> OverseerSubsystemContext<M> {
-	/// Create a new `OverseerSubsystemContext`.
-	fn new(
-		signals: metered::MeteredReceiver<OverseerSignal>,
-		messages: SubsystemIncomingMessages<M>,
-		to_subsystems: ChannelsOut,
-		to_overseer: metered::UnboundedMeteredSender<ToOverseer>,
-		metrics: Metrics,
-	) -> Self {
-		let signals_received = SignalsReceived::default();
-		OverseerSubsystemContext {
-			signals,
-			messages,
-			to_subsystems: OverseerSubsystemSender {
-				channels: to_subsystems,
-				signals_received: signals_received.clone(),
-			},
-			to_overseer,
-			signals_received,
-			pending_incoming: None,
-			metrics,
-		 }
-	}
-
-	/// Create a new `OverseerSubsystemContext` with no metering.
-	///
-	/// Intended for tests.
-	#[allow(unused)]
-	fn new_unmetered(
-		signals: metered::MeteredReceiver<OverseerSignal>,
-		messages: SubsystemIncomingMessages<M>,
-		to_subsystems: ChannelsOut,
-		to_overseer: metered::UnboundedMeteredSender<ToOverseer>,
-	) -> Self {
-		let metrics = Metrics::default();
-		OverseerSubsystemContext::new(signals, messages, to_subsystems, to_overseer, metrics)
-	}
-}
+/// The `Overseer` itself.
+#[overlord(
+	gen=AllMessages,
+	event=Event,
+	signal=OverseerSignal,
+	error=SubsystemError,
+	network=NetworkBridgeEvent<protocol_v1::ValidationProtocol>,
+)]
+pub struct Overseer<SupportsParachains> {
 
-#[async_trait::async_trait]
-impl<M: Send + 'static> SubsystemContext for OverseerSubsystemContext<M> {
-	type Message = M;
-	type Sender = OverseerSubsystemSender;
+	#[subsystem(no_dispatch, CandidateValidationMessage)]
+	candidate_validation: CandidateValidation,
 
-	async fn try_recv(&mut self) -> Result<Option<FromOverseer<M>>, ()> {
-		match poll!(self.recv()) {
-			Poll::Ready(msg) => Ok(Some(msg.map_err(|_| ())?)),
-			Poll::Pending => Ok(None),
-		}
-	}
+	#[subsystem(no_dispatch, CandidateBackingMessage)]
+	candidate_backing: CandidateBacking,
 
-	async fn recv(&mut self) -> SubsystemResult<FromOverseer<M>> {
-		loop {
-			// If we have a message pending an overseer signal, we only poll for signals
-			// in the meantime.
-			let signals_received = self.signals_received.load();
-			if let Some((needs_signals_received, msg)) = self.pending_incoming.take() {
-				if needs_signals_received <= signals_received {
-					return Ok(FromOverseer::Communication { msg });
-				} else {
-					self.pending_incoming = Some((needs_signals_received, msg));
-					tracing::debug!(
-						target: LOG_TARGET,
-						subsystem = std::any::type_name::<M>(),
-						diff = needs_signals_received - signals_received,
-						"waiting for a signal",
-					);
-					// wait for next signal.
-					let signal = self.signals.next().await
-						.ok_or(SubsystemError::Context(
-							"Signal channel is terminated and empty."
-							.to_owned()
-						))?;
-
-					self.signals_received.inc();
-					return Ok(FromOverseer::Signal(signal))
-				}
-			}
+	#[subsystem(StatementDistributionMessage)]
+	statement_distribution: StatementDistribution,
 
-			let mut await_message = self.messages.next();
-			let mut await_signal = self.signals.next();
-			let pending_incoming = &mut self.pending_incoming;
+	#[subsystem(no_dispatch, AvailabilityDistributionMessage)]
+	availability_distribution: AvailabilityDistribution,
 
-			// Otherwise, wait for the next signal or incoming message.
-			let from_overseer = futures::select_biased! {
-				signal = await_signal => {
-					let signal = signal
-						.ok_or(SubsystemError::Context(
-							"Signal channel is terminated and empty."
-							.to_owned()
-						))?;
-
-					FromOverseer::Signal(signal)
-				}
-				msg = await_message => {
-					let packet = msg
-						.ok_or(SubsystemError::Context(
-							"Message channel is terminated and empty."
-							.to_owned()
-						))?;
-
-					if packet.signals_received > signals_received {
-						// wait until we've received enough signals to return this message.
-						*pending_incoming = Some((packet.signals_received, packet.message));
-						continue;
-					} else {
-						// we know enough to return this message.
-						FromOverseer::Communication { msg: packet.message}
-					}
-				}
-			};
+	#[subsystem(no_dispatch, AvailabilityRecoveryMessage)]
+	availability_recovery: AvailabilityRecovery,
 
-			if let FromOverseer::Signal(_) = from_overseer {
-				self.signals_received.inc();
-			}
+	#[subsystem(blocking, no_dispatch, BitfieldSigningMessage)]
+	bitfield_signing: BitfieldSigning,
 
-			return Ok(from_overseer);
-		}
-	}
+	#[subsystem(BitfieldDistributionMessage)]
+	bitfield_distribution: BitfieldDistribution,
 
-	fn spawn(&mut self, name: &'static str, s: Pin<Box<dyn Future<Output = ()> + Send>>)
-		-> SubsystemResult<()>
-	{
-		self.to_overseer.unbounded_send(ToOverseer::SpawnJob {
-			name,
-			s,
-		}).map_err(|_| SubsystemError::TaskSpawn(name))
-	}
+	#[subsystem(no_dispatch, ProvisionerMessage)]
+	provisioner: Provisioner,
 
-	fn spawn_blocking(&mut self, name: &'static str, s: Pin<Box<dyn Future<Output = ()> + Send>>)
-		-> SubsystemResult<()>
-	{
-		self.to_overseer.unbounded_send(ToOverseer::SpawnBlockingJob {
-			name,
-			s,
-		}).map_err(|_| SubsystemError::TaskSpawn(name))
-	}
+	#[subsystem(no_dispatch, blocking, RuntimeApiMessage)]
+	runtime_api: RuntimeApi,
 
-	fn sender(&mut self) -> &mut OverseerSubsystemSender {
-		&mut self.to_subsystems
-	}
-}
+	#[subsystem(no_dispatch, blocking, AvailabilityStoreMessage)]
+	availability_store: AvailabilityStore,
 
-/// A subsystem that we oversee.
-///
-/// Ties together the [`Subsystem`] itself and it's running instance
-/// (which may be missing if the [`Subsystem`] is not running at the moment
-/// for whatever reason).
-///
-/// [`Subsystem`]: trait.Subsystem.html
-struct OverseenSubsystem<M> {
-	instance: Option<SubsystemInstance<M>>,
-}
+	#[subsystem(no_dispatch, NetworkBridgeMessage)]
+	network_bridge: NetworkBridge,
 
-impl<M> OverseenSubsystem<M> {
-	/// Send a message to the wrapped subsystem.
-	///
-	/// If the inner `instance` is `None`, nothing is happening.
-	async fn send_message(&mut self, msg: M, origin: &'static str) -> SubsystemResult<()> {
-		const MESSAGE_TIMEOUT: Duration = Duration::from_secs(10);
-
-		if let Some(ref mut instance) = self.instance {
-			match instance.tx_bounded.send(MessagePacket {
-				signals_received: instance.signals_received,
-				message: msg.into()
-			}).timeout(MESSAGE_TIMEOUT).await
-			{
-				None => {
-					tracing::error!(
-						target: LOG_TARGET,
-						%origin,
-						"Subsystem {} appears unresponsive.",
-						instance.name,
-					);
-					Err(SubsystemError::SubsystemStalled(instance.name))
-				}
-				Some(res) => res.map_err(Into::into),
-			}
-		} else {
-			Ok(())
-		}
-	}
+	#[subsystem(no_dispatch, blocking, ChainApiMessage)]
+	chain_api: ChainApi,
 
-	/// Send a signal to the wrapped subsystem.
-	///
-	/// If the inner `instance` is `None`, nothing is happening.
-	async fn send_signal(&mut self, signal: OverseerSignal) -> SubsystemResult<()> {
-		const SIGNAL_TIMEOUT: Duration = Duration::from_secs(10);
+	#[subsystem(no_dispatch, CollationGenerationMessage)]
+	collation_generation: CollationGeneration,
 
-		if let Some(ref mut instance) = self.instance {
-			match instance.tx_signal.send(signal.clone()).timeout(SIGNAL_TIMEOUT).await {
-				None => {
-					tracing::error!(
-						target: LOG_TARGET,
-						?signal,
-						received = instance.signals_received,
-						"Subsystem {} appears unresponsive.",
-						instance.name,
-					);
-					Err(SubsystemError::SubsystemStalled(instance.name))
-				}
-				Some(res) => {
-					let res = res.map_err(Into::into);
-					if res.is_ok() {
-						instance.signals_received += 1;
-					}
-					res
-				}
-			}
-		} else {
-			Ok(())
-		}
-	}
-}
+	#[subsystem(no_dispatch, CollatorProtocolMessage)]
+	collator_protocol: CollatorProtocol,
 
-#[derive(Clone)]
-struct SubsystemMeters {
-	bounded: metered::Meter,
-	unbounded: metered::Meter,
-	signals: metered::Meter,
-}
+	#[subsystem(ApprovalDistributionMessage)]
+	approval_distribution: ApprovalDistribution,
 
-impl SubsystemMeters {
-	fn read(&self) -> SubsystemMeterReadouts {
-		SubsystemMeterReadouts {
-			bounded: self.bounded.read(),
-			unbounded: self.unbounded.read(),
-			signals: self.signals.read(),
-		}
-	}
-}
+	#[subsystem(no_dispatch, ApprovalVotingMessage)]
+	approval_voting: ApprovalVoting,
 
-struct SubsystemMeterReadouts {
-	bounded: metered::Readout,
-	unbounded: metered::Readout,
-	signals: metered::Readout,
-}
+	#[subsystem(no_dispatch, GossipSupportMessage)]
+	gossip_support: GossipSupport,
 
+	#[subsystem(no_dispatch, wip, DisputeCoordinatorMessage)]
+	dipute_coordinator: DisputeCoordinator,
 
-/// Store 2 days worth of blocks, not accounting for forks,
-/// in the LRU cache. Assumes a 6-second block time.
-const KNOWN_LEAVES_CACHE_SIZE: usize = 2 * 24 * 3600 / 6;
+	#[subsystem(no_dispatch, wip, DisputeParticipationMessage)]
+	dispute_participation: DisputeParticipation,
 
-/// The `Overseer` itself.
-pub struct Overseer<S, SupportsParachains> {
-	/// Handles to all subsystems.
-	subsystems: AllSubsystems<
-		OverseenSubsystem<CandidateValidationMessage>,
-		OverseenSubsystem<CandidateBackingMessage>,
-		OverseenSubsystem<StatementDistributionMessage>,
-		OverseenSubsystem<AvailabilityDistributionMessage>,
-		OverseenSubsystem<AvailabilityRecoveryMessage>,
-		OverseenSubsystem<BitfieldSigningMessage>,
-		OverseenSubsystem<BitfieldDistributionMessage>,
-		OverseenSubsystem<ProvisionerMessage>,
-		OverseenSubsystem<RuntimeApiMessage>,
-		OverseenSubsystem<AvailabilityStoreMessage>,
-		OverseenSubsystem<NetworkBridgeMessage>,
-		OverseenSubsystem<ChainApiMessage>,
-		OverseenSubsystem<CollationGenerationMessage>,
-		OverseenSubsystem<CollatorProtocolMessage>,
-		OverseenSubsystem<ApprovalDistributionMessage>,
-		OverseenSubsystem<ApprovalVotingMessage>,
-		OverseenSubsystem<GossipSupportMessage>,
-	>,
-
-	/// Spawner to spawn tasks to.
-	s: S,
-
-	/// Here we keep handles to spawned subsystems to be notified when they terminate.
-	running_subsystems: FuturesUnordered<BoxFuture<'static, SubsystemResult<()>>>,
-
-	/// Gather running subsystems' outbound streams into one.
-	to_overseer_rx: Fuse<metered::UnboundedMeteredReceiver<ToOverseer>>,
-
-	/// Events that are sent to the overseer from the outside world
-	events_rx: metered::MeteredReceiver<Event>,
+	#[subsystem(no_dispatch, wip, ChainSelectionMessage)]
+	chain_selection: ChainSelection,
 
 	/// External listeners waiting for a hash to be in the active-leave set.
-	activation_external_listeners: HashMap<Hash, Vec<oneshot::Sender<SubsystemResult<()>>>>,
+	pub activation_external_listeners: HashMap<Hash, Vec<oneshot::Sender<SubsystemResult<()>>>>,
 
 	/// Stores the [`jaeger::Span`] per active leaf.
-	span_per_active_leaf: HashMap<Hash, Arc<jaeger::Span>>,
+	pub span_per_active_leaf: HashMap<Hash, Arc<jaeger::Span>>,
 
 	/// A set of leaves that `Overseer` starts working with.
 	///
 	/// Drained at the beginning of `run` and never used again.
-	leaves: Vec<(Hash, BlockNumber)>,
+	pub leaves: Vec<(Hash, BlockNumber)>,
 
 	/// The set of the "active leaves".
-	active_leaves: HashMap<Hash, BlockNumber>,
+	pub active_leaves: HashMap<Hash, BlockNumber>,
 
 	/// An implementation for checking whether a header supports parachain consensus.
-	supports_parachains: SupportsParachains,
+	pub supports_parachains: SupportsParachains,
 
 	/// An LRU cache for keeping track of relay-chain heads that have already been seen.
-	known_leaves: LruCache<Hash, ()>,
+	pub known_leaves: LruCache<Hash, ()>,
 
 	/// Various Prometheus metrics.
-	metrics: Metrics,
-}
-
-/// Overseer Prometheus metrics.
-#[derive(Clone)]
-struct MetricsInner {
-	activated_heads_total: prometheus::Counter<prometheus::U64>,
-	deactivated_heads_total: prometheus::Counter<prometheus::U64>,
-	messages_relayed_total: prometheus::Counter<prometheus::U64>,
-	to_subsystem_bounded_sent: prometheus::GaugeVec<prometheus::U64>,
-	to_subsystem_bounded_received: prometheus::GaugeVec<prometheus::U64>,
-	to_subsystem_unbounded_sent: prometheus::GaugeVec<prometheus::U64>,
-	to_subsystem_unbounded_received: prometheus::GaugeVec<prometheus::U64>,
-	signals_sent: prometheus::GaugeVec<prometheus::U64>,
-	signals_received: prometheus::GaugeVec<prometheus::U64>,
-}
-
-#[derive(Default, Clone)]
-struct Metrics(Option<MetricsInner>);
-
-impl Metrics {
-	fn on_head_activated(&self) {
-		if let Some(metrics) = &self.0 {
-			metrics.activated_heads_total.inc();
-		}
-	}
-
-	fn on_head_deactivated(&self) {
-		if let Some(metrics) = &self.0 {
-			metrics.deactivated_heads_total.inc();
-		}
-	}
-
-	fn on_message_relayed(&self) {
-		if let Some(metrics) = &self.0 {
-			metrics.messages_relayed_total.inc();
-		}
-	}
-
-	fn channel_fill_level_snapshot(
-		&self,
-		to_subsystem: AllSubsystemsSame<(&'static str, SubsystemMeterReadouts)>,
-	) {
-		self.0.as_ref().map(|metrics| {
-			to_subsystem.map_subsystems(
-				|(name, readouts): (_, SubsystemMeterReadouts)| {
-					metrics.to_subsystem_bounded_sent.with_label_values(&[name])
-						.set(readouts.bounded.sent as u64);
-
-					metrics.to_subsystem_bounded_received.with_label_values(&[name])
-						.set(readouts.bounded.received as u64);
-
-					metrics.to_subsystem_unbounded_sent.with_label_values(&[name])
-						.set(readouts.unbounded.sent as u64);
-
-					metrics.to_subsystem_unbounded_received.with_label_values(&[name])
-						.set(readouts.unbounded.received as u64);
-
-					metrics.signals_sent.with_label_values(&[name])
-						.set(readouts.signals.sent as u64);
-
-					metrics.signals_received.with_label_values(&[name])
-						.set(readouts.signals.received as u64);
-				});
-		});
-	}
-}
-
-impl metrics::Metrics for Metrics {
-	fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError> {
-		let metrics = MetricsInner {
-			activated_heads_total: prometheus::register(
-				prometheus::Counter::new(
-					"parachain_activated_heads_total",
-					"Number of activated heads."
-				)?,
-				registry,
-			)?,
-			deactivated_heads_total: prometheus::register(
-				prometheus::Counter::new(
-					"parachain_deactivated_heads_total",
-					"Number of deactivated heads."
-				)?,
-				registry,
-			)?,
-			messages_relayed_total: prometheus::register(
-				prometheus::Counter::new(
-					"parachain_messages_relayed_total",
-					"Number of messages relayed by Overseer."
-				)?,
-				registry,
-			)?,
-			to_subsystem_bounded_sent: prometheus::register(
-				prometheus::GaugeVec::<prometheus::U64>::new(
-					prometheus::Opts::new(
-						"parachain_subsystem_bounded_sent",
-						"Number of elements sent to subsystems' bounded queues",
-					),
-					&[
-						"subsystem_name",
-					],
-				)?,
-				registry,
-			)?,
-			to_subsystem_bounded_received: prometheus::register(
-				prometheus::GaugeVec::<prometheus::U64>::new(
-					prometheus::Opts::new(
-						"parachain_subsystem_bounded_received",
-						"Number of elements received by subsystems' bounded queues",
-					),
-					&[
-						"subsystem_name",
-					],
-				)?,
-				registry,
-			)?,
-			to_subsystem_unbounded_sent: prometheus::register(
-				prometheus::GaugeVec::<prometheus::U64>::new(
-					prometheus::Opts::new(
-						"parachain_subsystem_unbounded_sent",
-						"Number of elements sent to subsystems' unbounded queues",
-					),
-					&[
-						"subsystem_name",
-					],
-				)?,
-				registry,
-			)?,
-			to_subsystem_unbounded_received: prometheus::register(
-				prometheus::GaugeVec::<prometheus::U64>::new(
-					prometheus::Opts::new(
-						"parachain_subsystem_unbounded_received",
-						"Number of elements received by subsystems' unbounded queues",
-					),
-					&[
-						"subsystem_name",
-					],
-				)?,
-				registry,
-			)?,
-			signals_sent: prometheus::register(
-				prometheus::GaugeVec::<prometheus::U64>::new(
-					prometheus::Opts::new(
-						"parachain_overseer_signals_sent",
-						"Number of signals sent by overseer to subsystems",
-					),
-					&[
-						"subsystem_name",
-					],
-				)?,
-				registry,
-			)?,
-			signals_received: prometheus::register(
-				prometheus::GaugeVec::<prometheus::U64>::new(
-					prometheus::Opts::new(
-						"parachain_overseer_signals_received",
-						"Number of signals received by subsystems from overseer",
-					),
-					&[
-						"subsystem_name",
-					],
-				)?,
-				registry,
-			)?,
-		};
-		Ok(Metrics(Some(metrics)))
-	}
-}
-
-impl fmt::Debug for Metrics {
-	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-		f.write_str("Metrics {{...}}")
-	}
+	pub metrics: Metrics,
 }
 
 impl<S, SupportsParachains> Overseer<S, SupportsParachains>
 where
-	S: SpawnNamed,
 	SupportsParachains: HeadSupportsParachains,
+	S: SpawnNamed,
 {
 	/// Create a new instance of the [`Overseer`] with a fixed set of [`Subsystem`]s.
 	///
-	/// This returns the overseer along with an [`OverseerHandler`] which can
+	/// This returns the overseer along with an [`OverseerHandle`] which can
 	/// be used to send messages from external parts of the codebase.
 	///
 	/// The [`OverseerHandler`] returned from this function is connected to
@@ -1367,22 +467,42 @@ where
 	/// # use std::time::Duration;
 	/// # use futures::{executor, pin_mut, select, FutureExt};
 	/// # use futures_timer::Delay;
-	/// # use polkadot_overseer::{Overseer, HeadSupportsParachains, AllSubsystems};
 	/// # use polkadot_primitives::v1::Hash;
-	/// # use polkadot_subsystem::{
-	/// #     Subsystem, DummySubsystem, SpawnedSubsystem, SubsystemContext,
-	/// #     messages::CandidateValidationMessage,
+	/// # use polkadot_overseer::{
+	/// # 	self as overseer,
+	/// #   OverseerSignal,
+	/// # 	SubsystemSender as _,
+	/// # 	AllMessages,
+	/// # 	AllSubsystems,
+	/// # 	HeadSupportsParachains,
+	/// # 	Overseer,
+	/// # 	SubsystemError,
+	/// # 	gen::{
+	/// # 		SubsystemContext,
+	/// # 		FromOverseer,
+	/// # 		SpawnedSubsystem,
+	/// # 	},
+	/// # };
+	/// # use polkadot_node_subsystem_types::messages::{
+	/// # 	CandidateValidationMessage, CandidateBackingMessage,
+	/// # 	NetworkBridgeMessage,
 	/// # };
 	///
 	/// struct ValidationSubsystem;
 	///
-	/// impl<C> Subsystem<C> for ValidationSubsystem
-	///     where C: SubsystemContext<Message=CandidateValidationMessage>
+	/// impl<Ctx> overseer::Subsystem<Ctx, SubsystemError> for ValidationSubsystem
+	/// where
+	///     Ctx: overseer::SubsystemContext<
+	///				Message=CandidateValidationMessage,
+	///				AllMessages=AllMessages,
+	///				Signal=OverseerSignal,
+	///				Error=SubsystemError,
+	///			>,
 	/// {
 	///     fn start(
 	///         self,
-	///         mut ctx: C,
-	///     ) -> SpawnedSubsystem {
+	///         mut ctx: Ctx,
+	///     ) -> SpawnedSubsystem<SubsystemError> {
 	///         SpawnedSubsystem {
 	///             name: "validation-subsystem",
 	///             future: Box::pin(async move {
@@ -1401,7 +521,8 @@ where
 	///      fn head_supports_parachains(&self, _head: &Hash) -> bool { true }
 	/// }
 	/// let spawner = sp_core::testing::TaskExecutor::new();
-	/// let all_subsystems = AllSubsystems::<()>::dummy().replace_candidate_validation(ValidationSubsystem);
+	/// let all_subsystems = AllSubsystems::<()>::dummy()
+	///		.replace_candidate_validation(ValidationSubsystem);
 	/// let (overseer, _handler) = Overseer::new(
 	///     vec![],
 	///     all_subsystems,
@@ -1421,490 +542,116 @@ where
 	///     _ = timer => (),
 	/// }
 	/// #
-	/// # }); }
+	/// # 	});
+	/// # }
 	/// ```
 	pub fn new<CV, CB, SD, AD, AR, BS, BD, P, RA, AS, NB, CA, CG, CP, ApD, ApV, GS>(
 		leaves: impl IntoIterator<Item = BlockInfo>,
 		all_subsystems: AllSubsystems<CV, CB, SD, AD, AR, BS, BD, P, RA, AS, NB, CA, CG, CP, ApD, ApV, GS>,
 		prometheus_registry: Option<&prometheus::Registry>,
 		supports_parachains: SupportsParachains,
-		mut s: S,
-	) -> SubsystemResult<(Self, OverseerHandler)>
+		s: S,
+	) -> SubsystemResult<(Self, Handle)>
 	where
-		CV: Subsystem<OverseerSubsystemContext<CandidateValidationMessage>> + Send,
-		CB: Subsystem<OverseerSubsystemContext<CandidateBackingMessage>> + Send,
-		SD: Subsystem<OverseerSubsystemContext<StatementDistributionMessage>> + Send,
-		AD: Subsystem<OverseerSubsystemContext<AvailabilityDistributionMessage>> + Send,
-		AR: Subsystem<OverseerSubsystemContext<AvailabilityRecoveryMessage>> + Send,
-		BS: Subsystem<OverseerSubsystemContext<BitfieldSigningMessage>> + Send,
-		BD: Subsystem<OverseerSubsystemContext<BitfieldDistributionMessage>> + Send,
-		P: Subsystem<OverseerSubsystemContext<ProvisionerMessage>> + Send,
-		RA: Subsystem<OverseerSubsystemContext<RuntimeApiMessage>> + Send,
-		AS: Subsystem<OverseerSubsystemContext<AvailabilityStoreMessage>> + Send,
-		NB: Subsystem<OverseerSubsystemContext<NetworkBridgeMessage>> + Send,
-		CA: Subsystem<OverseerSubsystemContext<ChainApiMessage>> + Send,
-		CG: Subsystem<OverseerSubsystemContext<CollationGenerationMessage>> + Send,
-		CP: Subsystem<OverseerSubsystemContext<CollatorProtocolMessage>> + Send,
-		ApD: Subsystem<OverseerSubsystemContext<ApprovalDistributionMessage>> + Send,
-		ApV: Subsystem<OverseerSubsystemContext<ApprovalVotingMessage>> + Send,
-		GS: Subsystem<OverseerSubsystemContext<GossipSupportMessage>> + Send,
+		CV: Subsystem<OverseerSubsystemContext<CandidateValidationMessage>, SubsystemError> + Send,
+		CB: Subsystem<OverseerSubsystemContext<CandidateBackingMessage>, SubsystemError> + Send,
+		SD: Subsystem<OverseerSubsystemContext<StatementDistributionMessage>, SubsystemError> + Send,
+		AD: Subsystem<OverseerSubsystemContext<AvailabilityDistributionMessage>, SubsystemError> + Send,
+		AR: Subsystem<OverseerSubsystemContext<AvailabilityRecoveryMessage>, SubsystemError> + Send,
+		BS: Subsystem<OverseerSubsystemContext<BitfieldSigningMessage>, SubsystemError> + Send,
+		BD: Subsystem<OverseerSubsystemContext<BitfieldDistributionMessage>, SubsystemError> + Send,
+		P: Subsystem<OverseerSubsystemContext<ProvisionerMessage>, SubsystemError> + Send,
+		RA: Subsystem<OverseerSubsystemContext<RuntimeApiMessage>, SubsystemError> + Send,
+		AS: Subsystem<OverseerSubsystemContext<AvailabilityStoreMessage>, SubsystemError> + Send,
+		NB: Subsystem<OverseerSubsystemContext<NetworkBridgeMessage>, SubsystemError> + Send,
+		CA: Subsystem<OverseerSubsystemContext<ChainApiMessage>, SubsystemError> + Send,
+		CG: Subsystem<OverseerSubsystemContext<CollationGenerationMessage>, SubsystemError> + Send,
+		CP: Subsystem<OverseerSubsystemContext<CollatorProtocolMessage>, SubsystemError> + Send,
+		ApD: Subsystem<OverseerSubsystemContext<ApprovalDistributionMessage>, SubsystemError> + Send,
+		ApV: Subsystem<OverseerSubsystemContext<ApprovalVotingMessage>, SubsystemError> + Send,
+		GS: Subsystem<OverseerSubsystemContext<GossipSupportMessage>, SubsystemError> + Send,
+		S: SpawnNamed,
 	{
-		let (events_tx, events_rx) = metered::channel(CHANNEL_CAPACITY);
-
-		let handler = OverseerHandler {
-			events_tx: Some(events_tx.clone()),
-		};
-
-		let metrics = <Metrics as metrics::Metrics>::register(prometheus_registry)?;
-
-		let (to_overseer_tx, to_overseer_rx) = metered::unbounded();
-
-		let mut running_subsystems = FuturesUnordered::new();
-
-		let (candidate_validation_bounded_tx, candidate_validation_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (candidate_backing_bounded_tx, candidate_backing_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (statement_distribution_bounded_tx, statement_distribution_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (availability_distribution_bounded_tx, availability_distribution_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (availability_recovery_bounded_tx, availability_recovery_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (bitfield_signing_bounded_tx, bitfield_signing_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (bitfield_distribution_bounded_tx, bitfield_distribution_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (provisioner_bounded_tx, provisioner_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (runtime_api_bounded_tx, runtime_api_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (availability_store_bounded_tx, availability_store_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (network_bridge_bounded_tx, network_bridge_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (chain_api_bounded_tx, chain_api_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (collator_protocol_bounded_tx, collator_protocol_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (collation_generation_bounded_tx, collation_generation_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (approval_distribution_bounded_tx, approval_distribution_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (approval_voting_bounded_tx, approval_voting_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-		let (gossip_support_bounded_tx, gossip_support_bounded_rx)
-			= metered::channel(CHANNEL_CAPACITY);
-
-		let (candidate_validation_unbounded_tx, candidate_validation_unbounded_rx)
-			= metered::unbounded();
-		let (candidate_backing_unbounded_tx, candidate_backing_unbounded_rx)
-			= metered::unbounded();
-		let (statement_distribution_unbounded_tx, statement_distribution_unbounded_rx)
-			= metered::unbounded();
-		let (availability_distribution_unbounded_tx, availability_distribution_unbounded_rx)
-			= metered::unbounded();
-		let (availability_recovery_unbounded_tx, availability_recovery_unbounded_rx)
-			= metered::unbounded();
-		let (bitfield_signing_unbounded_tx, bitfield_signing_unbounded_rx)
-			= metered::unbounded();
-		let (bitfield_distribution_unbounded_tx, bitfield_distribution_unbounded_rx)
-			= metered::unbounded();
-		let (provisioner_unbounded_tx, provisioner_unbounded_rx)
-			= metered::unbounded();
-		let (runtime_api_unbounded_tx, runtime_api_unbounded_rx)
-			= metered::unbounded();
-		let (availability_store_unbounded_tx, availability_store_unbounded_rx)
-			= metered::unbounded();
-		let (network_bridge_unbounded_tx, network_bridge_unbounded_rx)
-			= metered::unbounded();
-		let (chain_api_unbounded_tx, chain_api_unbounded_rx)
-			= metered::unbounded();
-		let (collator_protocol_unbounded_tx, collator_protocol_unbounded_rx)
-			= metered::unbounded();
-		let (collation_generation_unbounded_tx, collation_generation_unbounded_rx)
-			= metered::unbounded();
-		let (approval_distribution_unbounded_tx, approval_distribution_unbounded_rx)
-			= metered::unbounded();
-		let (approval_voting_unbounded_tx, approval_voting_unbounded_rx)
-			= metered::unbounded();
-		let (gossip_support_unbounded_tx, gossip_support_unbounded_rx)
-			= metered::unbounded();
-
-		let channels_out = ChannelsOut {
-			candidate_validation: candidate_validation_bounded_tx.clone(),
-			candidate_backing: candidate_backing_bounded_tx.clone(),
-			statement_distribution: statement_distribution_bounded_tx.clone(),
-			availability_distribution: availability_distribution_bounded_tx.clone(),
-			availability_recovery: availability_recovery_bounded_tx.clone(),
-			bitfield_signing: bitfield_signing_bounded_tx.clone(),
-			bitfield_distribution: bitfield_distribution_bounded_tx.clone(),
-			provisioner: provisioner_bounded_tx.clone(),
-			runtime_api: runtime_api_bounded_tx.clone(),
-			availability_store: availability_store_bounded_tx.clone(),
-			network_bridge: network_bridge_bounded_tx.clone(),
-			chain_api: chain_api_bounded_tx.clone(),
-			collator_protocol: collator_protocol_bounded_tx.clone(),
-			collation_generation: collation_generation_bounded_tx.clone(),
-			approval_distribution: approval_distribution_bounded_tx.clone(),
-			approval_voting: approval_voting_bounded_tx.clone(),
-			gossip_support: gossip_support_bounded_tx.clone(),
-
-			candidate_validation_unbounded: candidate_validation_unbounded_tx.clone(),
-			candidate_backing_unbounded: candidate_backing_unbounded_tx.clone(),
-			statement_distribution_unbounded: statement_distribution_unbounded_tx.clone(),
-			availability_distribution_unbounded: availability_distribution_unbounded_tx.clone(),
-			availability_recovery_unbounded: availability_recovery_unbounded_tx.clone(),
-			bitfield_signing_unbounded: bitfield_signing_unbounded_tx.clone(),
-			bitfield_distribution_unbounded: bitfield_distribution_unbounded_tx.clone(),
-			provisioner_unbounded: provisioner_unbounded_tx.clone(),
-			runtime_api_unbounded: runtime_api_unbounded_tx.clone(),
-			availability_store_unbounded: availability_store_unbounded_tx.clone(),
-			network_bridge_unbounded: network_bridge_unbounded_tx.clone(),
-			chain_api_unbounded: chain_api_unbounded_tx.clone(),
-			collator_protocol_unbounded: collator_protocol_unbounded_tx.clone(),
-			collation_generation_unbounded: collation_generation_unbounded_tx.clone(),
-			approval_distribution_unbounded: approval_distribution_unbounded_tx.clone(),
-			approval_voting_unbounded: approval_voting_unbounded_tx.clone(),
-			gossip_support_unbounded: gossip_support_unbounded_tx.clone(),
-		};
-
-		let candidate_validation_subsystem = spawn(
-			&mut s,
-			candidate_validation_bounded_tx,
-			stream::select(candidate_validation_bounded_rx, candidate_validation_unbounded_rx),
-			candidate_validation_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.candidate_validation,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Regular,
-		)?;
-
-		let candidate_backing_subsystem = spawn(
-			&mut s,
-			candidate_backing_bounded_tx,
-			stream::select(candidate_backing_bounded_rx, candidate_backing_unbounded_rx),
-			candidate_backing_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.candidate_backing,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Regular,
-		)?;
-
-		let statement_distribution_subsystem = spawn(
-			&mut s,
-			statement_distribution_bounded_tx,
-			stream::select(statement_distribution_bounded_rx, statement_distribution_unbounded_rx),
-			statement_distribution_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.statement_distribution,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Regular,
-		)?;
-
-		let availability_distribution_subsystem = spawn(
-			&mut s,
-			availability_distribution_bounded_tx,
-			stream::select(availability_distribution_bounded_rx, availability_distribution_unbounded_rx),
-			availability_distribution_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.availability_distribution,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Regular,
-		)?;
-
-		let availability_recovery_subsystem = spawn(
-			&mut s,
-			availability_recovery_bounded_tx,
-			stream::select(availability_recovery_bounded_rx, availability_recovery_unbounded_rx),
-			availability_recovery_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.availability_recovery,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Regular,
-		)?;
-
-		let bitfield_signing_subsystem = spawn(
-			&mut s,
-			bitfield_signing_bounded_tx,
-			stream::select(bitfield_signing_bounded_rx, bitfield_signing_unbounded_rx),
-			bitfield_signing_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.bitfield_signing,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Regular,
-		)?;
-
-		let bitfield_distribution_subsystem = spawn(
-			&mut s,
-			bitfield_distribution_bounded_tx,
-			stream::select(bitfield_distribution_bounded_rx, bitfield_distribution_unbounded_rx),
-			bitfield_distribution_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.bitfield_distribution,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Regular,
-		)?;
-
-		let provisioner_subsystem = spawn(
-			&mut s,
-			provisioner_bounded_tx,
-			stream::select(provisioner_bounded_rx, provisioner_unbounded_rx),
-			provisioner_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.provisioner,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Regular,
-		)?;
-
-		let runtime_api_subsystem = spawn(
-			&mut s,
-			runtime_api_bounded_tx,
-			stream::select(runtime_api_bounded_rx, runtime_api_unbounded_rx),
-			runtime_api_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.runtime_api,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Regular,
-		)?;
-
-		let availability_store_subsystem = spawn(
-			&mut s,
-			availability_store_bounded_tx,
-			stream::select(availability_store_bounded_rx, availability_store_unbounded_rx),
-			availability_store_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.availability_store,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Blocking,
-		)?;
-
-		let network_bridge_subsystem = spawn(
-			&mut s,
-			network_bridge_bounded_tx,
-			stream::select(network_bridge_bounded_rx, network_bridge_unbounded_rx),
-			network_bridge_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.network_bridge,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Regular,
-		)?;
-
-		let chain_api_subsystem = spawn(
-			&mut s,
-			chain_api_bounded_tx,
-			stream::select(chain_api_bounded_rx, chain_api_unbounded_rx),
-			chain_api_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.chain_api,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Blocking,
-		)?;
-
-		let collation_generation_subsystem = spawn(
-			&mut s,
-			collation_generation_bounded_tx,
-			stream::select(collation_generation_bounded_rx, collation_generation_unbounded_rx),
-			collation_generation_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.collation_generation,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Regular,
-		)?;
-
-		let collator_protocol_subsystem = spawn(
-			&mut s,
-			collator_protocol_bounded_tx,
-			stream::select(collator_protocol_bounded_rx, collator_protocol_unbounded_rx),
-			collator_protocol_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.collator_protocol,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Regular,
-		)?;
-
-		let approval_distribution_subsystem = spawn(
-			&mut s,
-			approval_distribution_bounded_tx,
-			stream::select(approval_distribution_bounded_rx, approval_distribution_unbounded_rx),
-			approval_distribution_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.approval_distribution,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Regular,
-		)?;
-
-		let approval_voting_subsystem = spawn(
-			&mut s,
-			approval_voting_bounded_tx,
-			stream::select(approval_voting_bounded_rx, approval_voting_unbounded_rx),
-			approval_voting_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.approval_voting,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Blocking,
-		)?;
-
-		let gossip_support_subsystem = spawn(
-			&mut s,
-			gossip_support_bounded_tx,
-			stream::select(gossip_support_bounded_rx, gossip_support_unbounded_rx),
-			gossip_support_unbounded_tx.meter().clone(),
-			channels_out.clone(),
-			to_overseer_tx.clone(),
-			all_subsystems.gossip_support,
-			&metrics,
-			&mut running_subsystems,
-			TaskKind::Regular,
-		)?;
-
-		let leaves = leaves
-			.into_iter()
-			.map(|BlockInfo { hash, parent_hash: _, number }| (hash, number))
-			.collect();
-
-		let active_leaves = HashMap::new();
-		let activation_external_listeners = HashMap::new();
-
-		let subsystems = AllSubsystems {
-			candidate_validation: candidate_validation_subsystem,
-			candidate_backing: candidate_backing_subsystem,
-			statement_distribution: statement_distribution_subsystem,
-			availability_distribution: availability_distribution_subsystem,
-			availability_recovery: availability_recovery_subsystem,
-			bitfield_signing: bitfield_signing_subsystem,
-			bitfield_distribution: bitfield_distribution_subsystem,
-			provisioner: provisioner_subsystem,
-			runtime_api: runtime_api_subsystem,
-			availability_store: availability_store_subsystem,
-			network_bridge: network_bridge_subsystem,
-			chain_api: chain_api_subsystem,
-			collation_generation: collation_generation_subsystem,
-			collator_protocol: collator_protocol_subsystem,
-			approval_distribution: approval_distribution_subsystem,
-			approval_voting: approval_voting_subsystem,
-			gossip_support: gossip_support_subsystem,
-		};
-
+		let metrics: Metrics = <Metrics as MetricsTrait>::register(prometheus_registry)?;
+
+		let (mut overseer, handler) = Self::builder()
+			.candidate_validation(all_subsystems.candidate_validation)
+			.candidate_backing(all_subsystems.candidate_backing)
+			.statement_distribution(all_subsystems.statement_distribution)
+			.availability_distribution(all_subsystems.availability_distribution)
+			.availability_recovery(all_subsystems.availability_recovery)
+			.bitfield_signing(all_subsystems.bitfield_signing)
+			.bitfield_distribution(all_subsystems.bitfield_distribution)
+			.provisioner(all_subsystems.provisioner)
+			.runtime_api(all_subsystems.runtime_api)
+			.availability_store(all_subsystems.availability_store)
+			.network_bridge(all_subsystems.network_bridge)
+			.chain_api(all_subsystems.chain_api)
+			.collation_generation(all_subsystems.collation_generation)
+			.collator_protocol(all_subsystems.collator_protocol)
+			.approval_distribution(all_subsystems.approval_distribution)
+			.approval_voting(all_subsystems.approval_voting)
+			.gossip_support(all_subsystems.gossip_support)
+			.leaves(Vec::from_iter(
+				leaves.into_iter().map(|BlockInfo { hash, parent_hash: _, number }| (hash, number))
+			))
+			.known_leaves(LruCache::new(KNOWN_LEAVES_CACHE_SIZE))
+			.active_leaves(Default::default())
+			.span_per_active_leaf(Default::default())
+			.activation_external_listeners(Default::default())
+			.supports_parachains(supports_parachains)
+			.metrics(metrics.clone())
+			.spawner(s)
+			.build()?;
+
+		// spawn the metrics metronome task
 		{
 			struct ExtractNameAndMeters;
+
 			impl<'a, T: 'a> MapSubsystem<&'a OverseenSubsystem<T>> for ExtractNameAndMeters {
-				type Output = (&'static str, SubsystemMeters);
+				type Output = Option<(&'static str, SubsystemMeters)>;
 
 				fn map_subsystem(&self, subsystem: &'a OverseenSubsystem<T>) -> Self::Output {
-					let instance = subsystem.instance.as_ref()
-						.expect("Extraction is done directly after spawning when subsystems\
-						have not concluded; qed");
-
-					(
-						instance.name,
-						instance.meters.clone(),
-					)
+					subsystem.instance.as_ref().map(|instance| {
+						(
+							instance.name,
+							instance.meters.clone(),
+						)
+					})
 				}
 			}
+			let subsystem_meters = overseer.map_subsystems(ExtractNameAndMeters);
 
-			let subsystem_meters = subsystems.as_ref().map_subsystems(ExtractNameAndMeters);
 			let metronome_metrics = metrics.clone();
 			let metronome = Metronome::new(std::time::Duration::from_millis(950))
 				.for_each(move |_| {
-					let subsystem_meters = subsystem_meters.as_ref()
-						.map_subsystems(|&(name, ref meters): &(_, SubsystemMeters)| (name, meters.read()));
 
 					// We combine the amount of messages from subsystems to the overseer
 					// as well as the amount of messages from external sources to the overseer
-					// into one to_overseer value.
-					metronome_metrics.channel_fill_level_snapshot(subsystem_meters);
+					// into one `to_overseer` value.
+					metronome_metrics.channel_fill_level_snapshot(
+						subsystem_meters.iter()
+							.cloned()
+							.filter_map(|x| x)
+							.map(|(name, ref meters)| (name, meters.read()))
+					);
 
 					async move {
 						()
 					}
 				});
-			s.spawn("metrics_metronome", Box::pin(metronome));
+			overseer.spawner().spawn("metrics_metronome", Box::pin(metronome));
 		}
 
-		let this = Self {
-			subsystems,
-			s,
-			running_subsystems,
-			to_overseer_rx: to_overseer_rx.fuse(),
-			events_rx,
-			activation_external_listeners,
-			leaves,
-			active_leaves,
-			metrics,
-			span_per_active_leaf: Default::default(),
-			known_leaves: LruCache::new(KNOWN_LEAVES_CACHE_SIZE),
-			supports_parachains,
-		};
-
-		Ok((this, handler))
+		Ok((overseer, Handle(handler)))
 	}
 
-	// Stop the overseer.
+	/// Stop the overseer.
 	async fn stop(mut self) {
-		let _ = self.subsystems.candidate_validation.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.candidate_backing.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.statement_distribution.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.availability_distribution.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.availability_recovery.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.bitfield_signing.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.bitfield_distribution.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.provisioner.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.runtime_api.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.availability_store.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.network_bridge.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.chain_api.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.collator_protocol.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.collation_generation.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.approval_distribution.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.approval_voting.send_signal(OverseerSignal::Conclude).await;
-		let _ = self.subsystems.gossip_support.send_signal(OverseerSignal::Conclude).await;
-
-		let mut stop_delay = Delay::new(Duration::from_secs(STOP_DELAY)).fuse();
-
-		loop {
-			select! {
-				_ = self.running_subsystems.next() => {
-					if self.running_subsystems.is_empty() {
-						break;
-					}
-				},
-				_ = stop_delay => break,
-				complete => break,
-			}
-		}
+		let _ = self.wait_terminate(
+				OverseerSignal::Conclude,
+				::std::time::Duration::from_secs(1_u64)
+			).await;
 	}
 
 	/// Run the `Overseer`.
@@ -1933,6 +680,7 @@ where
 					match msg {
 						Event::MsgToSubsystem { msg, origin } => {
 							self.route_message(msg.into(), origin).await?;
+							self.metrics.on_message_relayed();
 						}
 						Event::Stop => {
 							self.stop().await;
@@ -2000,10 +748,9 @@ where
 		self.clean_up_external_listeners();
 
 		if !update.is_empty() {
-			self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await
-		} else {
-			Ok(())
+			self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await?;
 		}
+		Ok(())
 	}
 
 	async fn block_finalized(&mut self, block: BlockInfo) -> SubsystemResult<()> {
@@ -2034,94 +781,6 @@ where
 		Ok(())
 	}
 
-	async fn broadcast_signal(&mut self, signal: OverseerSignal) -> SubsystemResult<()> {
-		self.subsystems.candidate_validation.send_signal(signal.clone()).await?;
-		self.subsystems.candidate_backing.send_signal(signal.clone()).await?;
-		self.subsystems.statement_distribution.send_signal(signal.clone()).await?;
-		self.subsystems.availability_distribution.send_signal(signal.clone()).await?;
-		self.subsystems.availability_recovery.send_signal(signal.clone()).await?;
-		self.subsystems.bitfield_signing.send_signal(signal.clone()).await?;
-		self.subsystems.bitfield_distribution.send_signal(signal.clone()).await?;
-		self.subsystems.provisioner.send_signal(signal.clone()).await?;
-		self.subsystems.runtime_api.send_signal(signal.clone()).await?;
-		self.subsystems.availability_store.send_signal(signal.clone()).await?;
-		self.subsystems.network_bridge.send_signal(signal.clone()).await?;
-		self.subsystems.chain_api.send_signal(signal.clone()).await?;
-		self.subsystems.collator_protocol.send_signal(signal.clone()).await?;
-		self.subsystems.collation_generation.send_signal(signal.clone()).await?;
-		self.subsystems.approval_distribution.send_signal(signal.clone()).await?;
-		self.subsystems.approval_voting.send_signal(signal.clone()).await?;
-		self.subsystems.gossip_support.send_signal(signal).await?;
-
-		Ok(())
-	}
-
-	async fn route_message(
-		&mut self,
-		msg: AllMessages,
-		origin: &'static str,
-	) -> SubsystemResult<()> {
-		self.metrics.on_message_relayed();
-		match msg {
-			AllMessages::CandidateValidation(msg) => {
-				self.subsystems.candidate_validation.send_message(msg, origin).await?;
-			},
-			AllMessages::CandidateBacking(msg) => {
-				self.subsystems.candidate_backing.send_message(msg, origin).await?;
-			},
-			AllMessages::StatementDistribution(msg) => {
-				self.subsystems.statement_distribution.send_message(msg, origin).await?;
-			},
-			AllMessages::AvailabilityDistribution(msg) => {
-				self.subsystems.availability_distribution.send_message(msg, origin).await?;
-			},
-			AllMessages::AvailabilityRecovery(msg) => {
-				self.subsystems.availability_recovery.send_message(msg, origin).await?;
-			},
-			AllMessages::BitfieldDistribution(msg) => {
-				self.subsystems.bitfield_distribution.send_message(msg, origin).await?;
-			},
-			AllMessages::BitfieldSigning(msg) => {
-				self.subsystems.bitfield_signing.send_message(msg, origin).await?;
-			},
-			AllMessages::Provisioner(msg) => {
-				self.subsystems.provisioner.send_message(msg, origin).await?;
-			},
-			AllMessages::RuntimeApi(msg) => {
-				self.subsystems.runtime_api.send_message(msg, origin).await?;
-			},
-			AllMessages::AvailabilityStore(msg) => {
-				self.subsystems.availability_store.send_message(msg, origin).await?;
-			},
-			AllMessages::NetworkBridge(msg) => {
-				self.subsystems.network_bridge.send_message(msg, origin).await?;
-			},
-			AllMessages::ChainApi(msg) => {
-				self.subsystems.chain_api.send_message(msg, origin).await?;
-			},
-			AllMessages::CollationGeneration(msg) => {
-				self.subsystems.collation_generation.send_message(msg, origin).await?;
-			},
-			AllMessages::CollatorProtocol(msg) => {
-				self.subsystems.collator_protocol.send_message(msg, origin).await?;
-			},
-			AllMessages::ApprovalDistribution(msg) => {
-				self.subsystems.approval_distribution.send_message(msg, origin).await?;
-			},
-			AllMessages::ApprovalVoting(msg) => {
-				self.subsystems.approval_voting.send_message(msg, origin).await?;
-			},
-			AllMessages::GossipSupport(msg) => {
-				self.subsystems.gossip_support.send_message(msg, origin).await?;
-			},
-			AllMessages::DisputeCoordinator(_) => {}
-			AllMessages::DisputeParticipation(_) => {}
-			AllMessages::ChainSelection(_) => {}
-		}
-
-		Ok(())
-	}
-
 	/// Handles a header activation. If the header's state doesn't support the parachains API,
 	/// this returns `None`.
 	fn on_head_activated(&mut self, hash: &Hash, parent_hash: Option<Hash>)
@@ -2185,72 +844,42 @@ where
 	}
 
 	fn spawn_job(&mut self, name: &'static str, j: BoxFuture<'static, ()>) {
-		self.s.spawn(name, j);
+		self.spawner.spawn(name, j);
 	}
 
 	fn spawn_blocking_job(&mut self, name: &'static str, j: BoxFuture<'static, ()>) {
-		self.s.spawn_blocking(name, j);
+		self.spawner.spawn_blocking(name, j);
 	}
 }
 
-enum TaskKind {
-	Regular,
-	Blocking,
-}
 
-fn spawn<S: SpawnNamed, M: Send + 'static>(
-	spawner: &mut S,
-	message_tx: metered::MeteredSender<MessagePacket<M>>,
-	message_rx: SubsystemIncomingMessages<M>,
-	unbounded_meter: metered::Meter,
-	to_subsystems: ChannelsOut,
-	to_overseer_tx: metered::UnboundedMeteredSender<ToOverseer>,
-	s: impl Subsystem<OverseerSubsystemContext<M>>,
-	metrics: &Metrics,
-	futures: &mut FuturesUnordered<BoxFuture<'static, SubsystemResult<()>>>,
-	task_kind: TaskKind,
-) -> SubsystemResult<OverseenSubsystem<M>> {
-	let (signal_tx, signal_rx) = metered::channel(SIGNAL_CHANNEL_CAPACITY);
-	let ctx = OverseerSubsystemContext::new(
-		signal_rx,
-		message_rx,
-		to_subsystems,
-		to_overseer_tx,
-		metrics.clone(),
-	);
-	let SpawnedSubsystem { future, name } = s.start(ctx);
-
-	let (tx, rx) = oneshot::channel();
-
-	let fut = Box::pin(async move {
-		if let Err(e) = future.await {
-			tracing::error!(subsystem=name, err = ?e, "subsystem exited with error");
-		} else {
-			tracing::debug!(subsystem=name, "subsystem exited without an error");
-		}
-		let _ = tx.send(());
-	});
 
-	match task_kind {
-		TaskKind::Regular => spawner.spawn(name, fut),
-		TaskKind::Blocking => spawner.spawn_blocking(name, fut),
-	}
 
-	futures.push(Box::pin(rx.map(|e| { tracing::warn!(err = ?e, "dropping error"); Ok(()) })));
-
-	let instance = Some(SubsystemInstance {
-		meters: SubsystemMeters {
-			unbounded: unbounded_meter,
-			bounded: message_tx.meter().clone(),
-			signals: signal_tx.meter().clone(),
-		},
-		tx_signal: signal_tx,
-		tx_bounded: message_tx,
-		signals_received: 0,
-		name,
-	});
-
-	Ok(OverseenSubsystem {
-		instance,
-	})
+// Additional `From` implementations, in order to deal with incoming network messages.
+// Kept out of the proc macro, for sake of simplicity reduce the need to make even
+// more types to the proc macro logic.
+
+use polkadot_node_network_protocol::{
+	request_response::{request::IncomingRequest, v1 as req_res_v1},
+};
+
+impl From<IncomingRequest<req_res_v1::PoVFetchingRequest>> for AllMessages {
+	fn from(req: IncomingRequest<req_res_v1::PoVFetchingRequest>) -> Self {
+		From::<AvailabilityDistributionMessage>::from(From::from(req))
+	}
+}
+impl From<IncomingRequest<req_res_v1::ChunkFetchingRequest>> for AllMessages {
+	fn from(req: IncomingRequest<req_res_v1::ChunkFetchingRequest>) -> Self {
+		From::<AvailabilityDistributionMessage>::from(From::from(req))
+	}
+}
+impl From<IncomingRequest<req_res_v1::CollationFetchingRequest>> for AllMessages {
+	fn from(req: IncomingRequest<req_res_v1::CollationFetchingRequest>) -> Self {
+		From::<CollatorProtocolMessage>::from(From::from(req))
+	}
+}
+impl From<IncomingRequest<req_res_v1::AvailableDataFetchingRequest>> for AllMessages {
+	fn from(req: IncomingRequest<req_res_v1::AvailableDataFetchingRequest>) -> Self {
+		From::<AvailabilityRecoveryMessage>::from(From::from(req))
+	}
 }
diff --git a/polkadot/node/overseer/src/metrics.rs b/polkadot/node/overseer/src/metrics.rs
new file mode 100644
index 0000000000000000000000000000000000000000..3563f23fbd4b5f52c8044e14cfd3d7e2af8e4a8e
--- /dev/null
+++ b/polkadot/node/overseer/src/metrics.rs
@@ -0,0 +1,195 @@
+// Copyright 2020 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Prometheus metrics related to the overseer and its channels.
+
+use super::*;
+use polkadot_node_metrics::metrics::{self, prometheus};
+
+/// Overseer Prometheus metrics.
+#[derive(Clone)]
+struct MetricsInner {
+	activated_heads_total: prometheus::Counter<prometheus::U64>,
+	deactivated_heads_total: prometheus::Counter<prometheus::U64>,
+	messages_relayed_total: prometheus::Counter<prometheus::U64>,
+	to_subsystem_bounded_sent: prometheus::GaugeVec<prometheus::U64>,
+	to_subsystem_bounded_received: prometheus::GaugeVec<prometheus::U64>,
+	to_subsystem_unbounded_sent: prometheus::GaugeVec<prometheus::U64>,
+	to_subsystem_unbounded_received: prometheus::GaugeVec<prometheus::U64>,
+	signals_sent: prometheus::GaugeVec<prometheus::U64>,
+	signals_received: prometheus::GaugeVec<prometheus::U64>,
+}
+
+
+/// A sharable metrics type for usage with the overseer.
+#[derive(Default, Clone)]
+pub struct Metrics(Option<MetricsInner>);
+
+impl Metrics {
+	pub(crate) fn on_head_activated(&self) {
+		if let Some(metrics) = &self.0 {
+			metrics.activated_heads_total.inc();
+		}
+	}
+
+	pub(crate) fn on_head_deactivated(&self) {
+		if let Some(metrics) = &self.0 {
+			metrics.deactivated_heads_total.inc();
+		}
+	}
+
+	pub(crate) fn on_message_relayed(&self) {
+		if let Some(metrics) = &self.0 {
+			metrics.messages_relayed_total.inc();
+		}
+	}
+
+	pub(crate) fn channel_fill_level_snapshot(
+		&self,
+		collection: impl IntoIterator<Item=(&'static str, SubsystemMeterReadouts)>,
+	) {
+		if let Some(metrics) = &self.0 {
+			collection.into_iter().for_each(
+					|(name, readouts): (_, SubsystemMeterReadouts)| {
+						metrics.to_subsystem_bounded_sent.with_label_values(&[name])
+							.set(readouts.bounded.sent as u64);
+
+						metrics.to_subsystem_bounded_received.with_label_values(&[name])
+							.set(readouts.bounded.received as u64);
+
+						metrics.to_subsystem_unbounded_sent.with_label_values(&[name])
+							.set(readouts.unbounded.sent as u64);
+
+						metrics.to_subsystem_unbounded_received.with_label_values(&[name])
+							.set(readouts.unbounded.received as u64);
+
+						metrics.signals_sent.with_label_values(&[name])
+							.set(readouts.signals.sent as u64);
+
+						metrics.signals_received.with_label_values(&[name])
+							.set(readouts.signals.received as u64);
+					}
+			);
+		}
+	}
+}
+
+impl metrics::Metrics for Metrics {
+	fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError> {
+		let metrics = MetricsInner {
+			activated_heads_total: prometheus::register(
+				prometheus::Counter::new(
+					"parachain_activated_heads_total",
+					"Number of activated heads."
+				)?,
+				registry,
+			)?,
+			deactivated_heads_total: prometheus::register(
+				prometheus::Counter::new(
+					"parachain_deactivated_heads_total",
+					"Number of deactivated heads."
+				)?,
+				registry,
+			)?,
+			messages_relayed_total: prometheus::register(
+				prometheus::Counter::new(
+					"parachain_messages_relayed_total",
+					"Number of messages relayed by Overseer."
+				)?,
+				registry,
+			)?,
+			to_subsystem_bounded_sent: prometheus::register(
+				prometheus::GaugeVec::<prometheus::U64>::new(
+					prometheus::Opts::new(
+						"parachain_subsystem_bounded_sent",
+						"Number of elements sent to subsystems' bounded queues",
+					),
+					&[
+						"subsystem_name",
+					],
+				)?,
+				registry,
+			)?,
+			to_subsystem_bounded_received: prometheus::register(
+				prometheus::GaugeVec::<prometheus::U64>::new(
+					prometheus::Opts::new(
+						"parachain_subsystem_bounded_received",
+						"Number of elements received by subsystems' bounded queues",
+					),
+					&[
+						"subsystem_name",
+					],
+				)?,
+				registry,
+			)?,
+			to_subsystem_unbounded_sent: prometheus::register(
+				prometheus::GaugeVec::<prometheus::U64>::new(
+					prometheus::Opts::new(
+						"parachain_subsystem_unbounded_sent",
+						"Number of elements sent to subsystems' unbounded queues",
+					),
+					&[
+						"subsystem_name",
+					],
+				)?,
+				registry,
+			)?,
+			to_subsystem_unbounded_received: prometheus::register(
+				prometheus::GaugeVec::<prometheus::U64>::new(
+					prometheus::Opts::new(
+						"parachain_subsystem_unbounded_received",
+						"Number of elements received by subsystems' unbounded queues",
+					),
+					&[
+						"subsystem_name",
+					],
+				)?,
+				registry,
+			)?,
+			signals_sent: prometheus::register(
+				prometheus::GaugeVec::<prometheus::U64>::new(
+					prometheus::Opts::new(
+						"parachain_overseer_signals_sent",
+						"Number of signals sent by overseer to subsystems",
+					),
+					&[
+						"subsystem_name",
+					],
+				)?,
+				registry,
+			)?,
+			signals_received: prometheus::register(
+				prometheus::GaugeVec::<prometheus::U64>::new(
+					prometheus::Opts::new(
+						"parachain_overseer_signals_received",
+						"Number of signals received by subsystems from overseer",
+					),
+					&[
+						"subsystem_name",
+					],
+				)?,
+				registry,
+			)?,
+		};
+		Ok(Metrics(Some(metrics)))
+	}
+}
+
+impl fmt::Debug for Metrics {
+	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+		f.write_str("Metrics {{...}}")
+	}
+}
diff --git a/polkadot/node/overseer/src/subsystems.rs b/polkadot/node/overseer/src/subsystems.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b75e7e50e6b87cff1e36b5a8724d0c126b1dad42
--- /dev/null
+++ b/polkadot/node/overseer/src/subsystems.rs
@@ -0,0 +1,256 @@
+// Copyright 2020 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Legacy way of defining subsystems.
+//!
+//! In the future, everything should be set up using the generated
+//! overeseer builder pattern instead.
+
+use polkadot_node_subsystem_types::errors::SubsystemError;
+use polkadot_overseer_gen::{
+	MapSubsystem, SubsystemContext,
+	Subsystem,
+	SpawnedSubsystem,
+	FromOverseer,
+};
+use polkadot_overseer_all_subsystems_gen::AllSubsystemsGen;
+use crate::OverseerSignal;
+use crate::AllMessages;
+
+/// A dummy subsystem that implements [`Subsystem`] for all
+/// types of messages. Used for tests or as a placeholder.
+#[derive(Clone, Copy, Debug)]
+pub struct DummySubsystem;
+
+impl<Context> Subsystem<Context, SubsystemError> for DummySubsystem
+where
+	Context: SubsystemContext<Signal=OverseerSignal, Error=SubsystemError, AllMessages=AllMessages>,
+{
+	fn start(self, mut ctx: Context) -> SpawnedSubsystem<SubsystemError> {
+		let future = Box::pin(async move {
+			loop {
+				match ctx.recv().await {
+					Err(_) => return Ok(()),
+					Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => return Ok(()),
+					Ok(overseer_msg) => {
+						tracing::debug!(
+							target: "dummy-subsystem",
+							"Discarding a message sent from overseer {:?}",
+							overseer_msg
+						);
+						continue;
+					}
+				}
+			}
+		});
+
+		SpawnedSubsystem {
+			name: "dummy-subsystem",
+			future,
+		}
+	}
+}
+
+
+/// This struct is passed as an argument to create a new instance of an [`Overseer`].
+///
+/// As any entity that satisfies the interface may act as a [`Subsystem`] this allows
+/// mocking in the test code:
+///
+/// Each [`Subsystem`] is supposed to implement some interface that is generic over
+/// message type that is specific to this [`Subsystem`]. At the moment not all
+/// subsystems are implemented and the rest can be mocked with the [`DummySubsystem`].
+#[derive(Debug, Clone, AllSubsystemsGen)]
+pub struct AllSubsystems<
+	CV = (), CB = (), SD = (), AD = (), AR = (), BS = (), BD = (), P = (),
+	RA = (), AS = (), NB = (), CA = (), CG = (), CP = (), ApD = (), ApV = (),
+	GS = (),
+> {
+	/// A candidate validation subsystem.
+	pub candidate_validation: CV,
+	/// A candidate backing subsystem.
+	pub candidate_backing: CB,
+	/// A statement distribution subsystem.
+	pub statement_distribution: SD,
+	/// An availability distribution subsystem.
+	pub availability_distribution: AD,
+	/// An availability recovery subsystem.
+	pub availability_recovery: AR,
+	/// A bitfield signing subsystem.
+	pub bitfield_signing: BS,
+	/// A bitfield distribution subsystem.
+	pub bitfield_distribution: BD,
+	/// A provisioner subsystem.
+	pub provisioner: P,
+	/// A runtime API subsystem.
+	pub runtime_api: RA,
+	/// An availability store subsystem.
+	pub availability_store: AS,
+	/// A network bridge subsystem.
+	pub network_bridge: NB,
+	/// A Chain API subsystem.
+	pub chain_api: CA,
+	/// A Collation Generation subsystem.
+	pub collation_generation: CG,
+	/// A Collator Protocol subsystem.
+	pub collator_protocol: CP,
+	/// An Approval Distribution subsystem.
+	pub approval_distribution: ApD,
+	/// An Approval Voting subsystem.
+	pub approval_voting: ApV,
+	/// A Connection Request Issuer subsystem.
+	pub gossip_support: GS,
+}
+
+impl<CV, CB, SD, AD, AR, BS, BD, P, RA, AS, NB, CA, CG, CP, ApD, ApV, GS>
+	AllSubsystems<CV, CB, SD, AD, AR, BS, BD, P, RA, AS, NB, CA, CG, CP, ApD, ApV, GS>
+{
+	/// Create a new instance of [`AllSubsystems`].
+	///
+	/// Each subsystem is set to [`DummySystem`].
+	///
+	///# Note
+	///
+	/// Because of a bug in rustc it is required that when calling this function,
+	/// you provide a "random" type for the first generic parameter:
+	///
+	/// ```
+	/// polkadot_overseer::AllSubsystems::<()>::dummy();
+	/// ```
+	pub fn dummy() -> AllSubsystems<
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+		DummySubsystem,
+	> {
+		AllSubsystems {
+			candidate_validation: DummySubsystem,
+			candidate_backing: DummySubsystem,
+			statement_distribution: DummySubsystem,
+			availability_distribution: DummySubsystem,
+			availability_recovery: DummySubsystem,
+			bitfield_signing: DummySubsystem,
+			bitfield_distribution: DummySubsystem,
+			provisioner: DummySubsystem,
+			runtime_api: DummySubsystem,
+			availability_store: DummySubsystem,
+			network_bridge: DummySubsystem,
+			chain_api: DummySubsystem,
+			collation_generation: DummySubsystem,
+			collator_protocol: DummySubsystem,
+			approval_distribution: DummySubsystem,
+			approval_voting: DummySubsystem,
+			gossip_support: DummySubsystem,
+		}
+	}
+
+	/// Reference every indidviudal subsystem.
+	pub fn as_ref(&self) -> AllSubsystems<&'_ CV, &'_ CB, &'_ SD, &'_ AD, &'_ AR, &'_ BS, &'_ BD, &'_ P, &'_ RA, &'_ AS, &'_ NB, &'_ CA, &'_ CG, &'_ CP, &'_ ApD, &'_ ApV, &'_ GS> {
+		AllSubsystems {
+			candidate_validation: &self.candidate_validation,
+			candidate_backing: &self.candidate_backing,
+			statement_distribution: &self.statement_distribution,
+			availability_distribution: &self.availability_distribution,
+			availability_recovery: &self.availability_recovery,
+			bitfield_signing: &self.bitfield_signing,
+			bitfield_distribution: &self.bitfield_distribution,
+			provisioner: &self.provisioner,
+			runtime_api: &self.runtime_api,
+			availability_store: &self.availability_store,
+			network_bridge: &self.network_bridge,
+			chain_api: &self.chain_api,
+			collation_generation: &self.collation_generation,
+			collator_protocol: &self.collator_protocol,
+			approval_distribution: &self.approval_distribution,
+			approval_voting: &self.approval_voting,
+			gossip_support: &self.gossip_support,
+		}
+	}
+
+	/// Map each subsystem.
+	pub fn map_subsystems<Mapper>(self, mapper: Mapper)
+		-> AllSubsystems<
+			<Mapper as MapSubsystem<CV>>::Output,
+			<Mapper as MapSubsystem<CB>>::Output,
+			<Mapper as MapSubsystem<SD>>::Output,
+			<Mapper as MapSubsystem<AD>>::Output,
+			<Mapper as MapSubsystem<AR>>::Output,
+			<Mapper as MapSubsystem<BS>>::Output,
+			<Mapper as MapSubsystem<BD>>::Output,
+			<Mapper as MapSubsystem<P>>::Output,
+			<Mapper as MapSubsystem<RA>>::Output,
+			<Mapper as MapSubsystem<AS>>::Output,
+			<Mapper as MapSubsystem<NB>>::Output,
+			<Mapper as MapSubsystem<CA>>::Output,
+			<Mapper as MapSubsystem<CG>>::Output,
+			<Mapper as MapSubsystem<CP>>::Output,
+			<Mapper as MapSubsystem<ApD>>::Output,
+			<Mapper as MapSubsystem<ApV>>::Output,
+			<Mapper as MapSubsystem<GS>>::Output,
+		>
+	where
+		Mapper: MapSubsystem<CV>,
+		Mapper: MapSubsystem<CB>,
+		Mapper: MapSubsystem<SD>,
+		Mapper: MapSubsystem<AD>,
+		Mapper: MapSubsystem<AR>,
+		Mapper: MapSubsystem<BS>,
+		Mapper: MapSubsystem<BD>,
+		Mapper: MapSubsystem<P>,
+		Mapper: MapSubsystem<RA>,
+		Mapper: MapSubsystem<AS>,
+		Mapper: MapSubsystem<NB>,
+		Mapper: MapSubsystem<CA>,
+		Mapper: MapSubsystem<CG>,
+		Mapper: MapSubsystem<CP>,
+		Mapper: MapSubsystem<ApD>,
+		Mapper: MapSubsystem<ApV>,
+		Mapper: MapSubsystem<GS>,
+	{
+		AllSubsystems {
+			candidate_validation: <Mapper as MapSubsystem<CV>>::map_subsystem(&mapper, self.candidate_validation),
+			candidate_backing: <Mapper as MapSubsystem<CB>>::map_subsystem(&mapper, self.candidate_backing),
+			statement_distribution: <Mapper as MapSubsystem<SD>>::map_subsystem(&mapper, self.statement_distribution),
+			availability_distribution: <Mapper as MapSubsystem<AD>>::map_subsystem(&mapper, self.availability_distribution),
+			availability_recovery: <Mapper as MapSubsystem<AR>>::map_subsystem(&mapper, self.availability_recovery),
+			bitfield_signing: <Mapper as MapSubsystem<BS>>::map_subsystem(&mapper, self.bitfield_signing),
+			bitfield_distribution: <Mapper as MapSubsystem<BD>>::map_subsystem(&mapper, self.bitfield_distribution),
+			provisioner: <Mapper as MapSubsystem<P>>::map_subsystem(&mapper, self.provisioner),
+			runtime_api: <Mapper as MapSubsystem<RA>>::map_subsystem(&mapper, self.runtime_api),
+			availability_store: <Mapper as MapSubsystem<AS>>::map_subsystem(&mapper, self.availability_store),
+			network_bridge: <Mapper as MapSubsystem<NB>>::map_subsystem(&mapper, self.network_bridge),
+			chain_api: <Mapper as MapSubsystem<CA>>::map_subsystem(&mapper, self.chain_api),
+			collation_generation: <Mapper as MapSubsystem<CG>>::map_subsystem(&mapper, self.collation_generation),
+			collator_protocol: <Mapper as MapSubsystem<CP>>::map_subsystem(&mapper, self.collator_protocol),
+			approval_distribution: <Mapper as MapSubsystem<ApD>>::map_subsystem(&mapper, self.approval_distribution),
+			approval_voting: <Mapper as MapSubsystem<ApV>>::map_subsystem(&mapper, self.approval_voting),
+			gossip_support: <Mapper as MapSubsystem<GS>>::map_subsystem(&mapper, self.gossip_support),
+		}
+	}
+}
diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs
index 888ebea0e1275a2402a9688ef8fe4fede338a67b..a4dd0359c70cb7ec7e42a37ba3363bbc5700da1e 100644
--- a/polkadot/node/overseer/src/tests.rs
+++ b/polkadot/node/overseer/src/tests.rs
@@ -16,23 +16,44 @@
 
 use std::sync::atomic;
 use std::collections::HashMap;
-use futures::{executor, pin_mut, select, FutureExt, pending};
+use std::task::{Poll};
+use futures::{executor, pin_mut, select, FutureExt, pending, poll, stream};
 
 use polkadot_primitives::v1::{CollatorPair, CandidateHash};
-use polkadot_subsystem::{messages::RuntimeApiRequest, messages::NetworkBridgeEvent, jaeger};
 use polkadot_node_primitives::{CollationResult, CollationGenerationConfig, PoV, BlockData};
 use polkadot_node_network_protocol::{PeerId, UnifiedReputationChange};
-use polkadot_node_subsystem_util::metered;
+use polkadot_node_subsystem_types::{
+	ActivatedLeaf, LeafStatus,
+	messages::{
+		RuntimeApiRequest,
+		NetworkBridgeEvent,
+	},
+	jaeger,
+};
+
+use crate::{
+	self as overseer,
+	Overseer,
+	HeadSupportsParachains,
+	gen::Delay,
+
+};
+use metered_channel as metered;
 
 use sp_core::crypto::Pair as _;
 use assert_matches::assert_matches;
 
 use super::*;
 
+
+type SpawnedSubsystem = crate::gen::SpawnedSubsystem<SubsystemError>;
+
 struct TestSubsystem1(metered::MeteredSender<usize>);
 
-impl<C> Subsystem<C> for TestSubsystem1
-	where C: SubsystemContext<Message=CandidateValidationMessage>
+impl<C> overseer::Subsystem<C, SubsystemError> for TestSubsystem1
+where
+	C: overseer::SubsystemContext<Message=CandidateValidationMessage,Signal=OverseerSignal,AllMessages=AllMessages>,
+
 {
 	fn start(self, mut ctx: C) -> SpawnedSubsystem {
 		let mut sender = self.0;
@@ -59,8 +80,9 @@ impl<C> Subsystem<C> for TestSubsystem1
 
 struct TestSubsystem2(metered::MeteredSender<usize>);
 
-impl<C> Subsystem<C> for TestSubsystem2
-	where C: SubsystemContext<Message=CandidateBackingMessage>
+impl<C> overseer::Subsystem<C, SubsystemError> for TestSubsystem2
+where
+	C: overseer::SubsystemContext<Message=CandidateBackingMessage,Signal=OverseerSignal,AllMessages=AllMessages>,
 {
 	fn start(self, mut ctx: C) -> SpawnedSubsystem {
 		let sender = self.0.clone();
@@ -73,14 +95,12 @@ impl<C> Subsystem<C> for TestSubsystem2
 					if c < 10 {
 						let (tx, _) = oneshot::channel();
 						ctx.send_message(
-							AllMessages::CandidateValidation(
-								CandidateValidationMessage::ValidateFromChainState(
-									Default::default(),
-									PoV {
-										block_data: BlockData(Vec::new()),
-									}.into(),
-									tx,
-								)
+							CandidateValidationMessage::ValidateFromChainState(
+								Default::default(),
+								PoV {
+									block_data: BlockData(Vec::new()),
+								}.into(),
+								tx,
 							)
 						).await;
 						c += 1;
@@ -107,8 +127,9 @@ impl<C> Subsystem<C> for TestSubsystem2
 
 struct ReturnOnStart;
 
-impl<C> Subsystem<C> for ReturnOnStart
-	where C: SubsystemContext<Message=CandidateBackingMessage>
+impl<C> overseer::Subsystem<C, SubsystemError> for ReturnOnStart
+where
+	C: overseer::SubsystemContext<Message=CandidateBackingMessage,Signal=OverseerSignal,AllMessages=AllMessages>,
 {
 	fn start(self, mut _ctx: C) -> SpawnedSubsystem {
 		SpawnedSubsystem {
@@ -283,8 +304,9 @@ fn overseer_ends_on_subsystem_exit() {
 
 struct TestSubsystem5(metered::MeteredSender<OverseerSignal>);
 
-impl<C> Subsystem<C> for TestSubsystem5
-	where C: SubsystemContext<Message=CandidateValidationMessage>
+impl<C> overseer::Subsystem<C, SubsystemError> for TestSubsystem5
+where
+	C: overseer::SubsystemContext<Message=CandidateValidationMessage,Signal=OverseerSignal,AllMessages=AllMessages>,
 {
 	fn start(self, mut ctx: C) -> SpawnedSubsystem {
 		let mut sender = self.0.clone();
@@ -314,8 +336,9 @@ impl<C> Subsystem<C> for TestSubsystem5
 
 struct TestSubsystem6(metered::MeteredSender<OverseerSignal>);
 
-impl<C> Subsystem<C> for TestSubsystem6
-	where C: SubsystemContext<Message=CandidateBackingMessage>
+impl<C> Subsystem<C, SubsystemError> for TestSubsystem6
+where
+	C: overseer::SubsystemContext<Message=CandidateBackingMessage,Signal=OverseerSignal,AllMessages=AllMessages>,
 {
 	fn start(self, mut ctx: C) -> SpawnedSubsystem {
 		let mut sender = self.0.clone();
@@ -396,15 +419,15 @@ fn overseer_start_stop_works() {
 			OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf {
 				hash: first_block_hash,
 				number: 1,
-				status: LeafStatus::Fresh,
 				span: Arc::new(jaeger::Span::Disabled),
+				status: LeafStatus::Fresh,
 			})),
 			OverseerSignal::ActiveLeaves(ActiveLeavesUpdate {
 				activated: [ActivatedLeaf {
 					hash: second_block_hash,
 					number: 2,
-					status: LeafStatus::Fresh,
 					span: Arc::new(jaeger::Span::Disabled),
+					status: LeafStatus::Fresh,
 				}].as_ref().into(),
 				deactivated: [first_block_hash].as_ref().into(),
 			}),
@@ -412,8 +435,8 @@ fn overseer_start_stop_works() {
 				activated: [ActivatedLeaf {
 					hash: third_block_hash,
 					number: 3,
-					status: LeafStatus::Fresh,
 					span: Arc::new(jaeger::Span::Disabled),
+					status: LeafStatus::Fresh,
 				}].as_ref().into(),
 				deactivated: [second_block_hash].as_ref().into(),
 			}),
@@ -507,14 +530,14 @@ fn overseer_finalize_works() {
 					ActivatedLeaf {
 						hash: first_block_hash,
 						number: 1,
-						status: LeafStatus::Fresh,
 						span: Arc::new(jaeger::Span::Disabled),
+						status: LeafStatus::Fresh,
 					},
 					ActivatedLeaf {
 						hash: second_block_hash,
 						number: 2,
-						status: LeafStatus::Fresh,
 						span: Arc::new(jaeger::Span::Disabled),
+						status: LeafStatus::Fresh,
 					},
 				].as_ref().into(),
 				..Default::default()
@@ -606,8 +629,8 @@ fn do_not_send_empty_leaves_update_on_block_finalization() {
 					ActivatedLeaf {
 						hash: imported_block.hash,
 						number: imported_block.number,
+						span: Arc::new(jaeger::Span::Disabled),
 						status: LeafStatus::Fresh,
-						span: Arc::new(jaeger::Span::Disabled)
 					}
 				].as_ref().into(),
 				..Default::default()
@@ -641,146 +664,6 @@ fn do_not_send_empty_leaves_update_on_block_finalization() {
 	});
 }
 
-// Tests that duplicate leaves have an attached 'Stale' status.
-#[test]
-fn overseer_stale_detection() {
-	let spawner = sp_core::testing::TaskExecutor::new();
-
-	executor::block_on(async move {
-		let a1_hash = [1; 32].into();
-		let b1_hash = [2; 32].into();
-
-		let a2_hash = [3; 32].into();
-		let b2_hash = [4; 32].into();
-
-		let first_block = BlockInfo {
-			hash: a1_hash,
-			parent_hash: [0; 32].into(),
-			number: 1,
-		};
-		let second_block = BlockInfo {
-			hash: b1_hash,
-			parent_hash: [0; 32].into(),
-			number: 1,
-		};
-
-		let third_block = BlockInfo {
-			hash: a2_hash,
-			parent_hash: a1_hash,
-			number: 2,
-		};
-
-		let fourth_block = BlockInfo {
-			hash: b2_hash,
-			parent_hash: b1_hash,
-			number: 2,
-		};
-
-		let (tx_5, mut rx_5) = metered::channel(64);
-		let (tx_6, mut rx_6) = metered::channel(64);
-		let all_subsystems = AllSubsystems::<()>::dummy()
-			.replace_candidate_validation(TestSubsystem5(tx_5))
-			.replace_candidate_backing(TestSubsystem6(tx_6));
-
-		let (overseer, mut handler) = Overseer::new(
-			vec![first_block.clone()],
-			all_subsystems,
-			None,
-			MockSupportsParachains,
-			spawner,
-		).unwrap();
-
-		let overseer_fut = overseer.run().fuse();
-		pin_mut!(overseer_fut);
-
-		let mut ss5_results = Vec::new();
-		let mut ss6_results = Vec::new();
-
-		handler.block_imported(second_block.clone()).await;
-
-		// import the second block of each chain to deactivate the heads.
-		handler.block_imported(third_block).await;
-		handler.block_imported(fourth_block).await;
-
-		// import the first blocks again (emulating a revert)
-		handler.block_imported(first_block).await;
-		handler.block_imported(second_block).await;
-
-		let expected_heartbeats = vec![
-			OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf {
-				hash: a1_hash,
-				number: 1,
-				status: LeafStatus::Fresh,
-				span: Arc::new(jaeger::Span::Disabled),
-			})),
-			OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf {
-				hash: b1_hash,
-				number: 1,
-				status: LeafStatus::Fresh,
-				span: Arc::new(jaeger::Span::Disabled),
-			})),
-			OverseerSignal::ActiveLeaves(ActiveLeavesUpdate {
-				activated: [ActivatedLeaf {
-					hash: a2_hash,
-					number: 2,
-					status: LeafStatus::Fresh,
-					span: Arc::new(jaeger::Span::Disabled),
-				}].as_ref().into(),
-				deactivated: [a1_hash].as_ref().into(),
-			}),
-			OverseerSignal::ActiveLeaves(ActiveLeavesUpdate {
-				activated: [ActivatedLeaf {
-					hash: b2_hash,
-					number: 2,
-					status: LeafStatus::Fresh,
-					span: Arc::new(jaeger::Span::Disabled),
-				}].as_ref().into(),
-				deactivated: [b1_hash].as_ref().into(),
-			}),
-			OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf {
-				hash: a1_hash,
-				number: 1,
-				status: LeafStatus::Stale,
-				span: Arc::new(jaeger::Span::Disabled),
-			})),
-			OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(ActivatedLeaf {
-				hash: b1_hash,
-				number: 1,
-				status: LeafStatus::Stale,
-				span: Arc::new(jaeger::Span::Disabled),
-			})),
-		];
-
-		loop {
-			select! {
-				res = overseer_fut => {
-					assert!(res.is_ok());
-					break;
-				},
-				res = rx_5.next() => {
-					if let Some(res) = res {
-						ss5_results.push(res);
-					}
-				}
-				res = rx_6.next() => {
-					if let Some(res) = res {
-						ss6_results.push(res);
-					}
-				}
-				complete => break,
-			}
-
-			if ss5_results.len() == expected_heartbeats.len() &&
-				ss6_results.len() == expected_heartbeats.len() {
-					handler.stop().await;
-			}
-		}
-
-		assert_eq!(ss5_results, expected_heartbeats);
-		assert_eq!(ss6_results, expected_heartbeats);
-	});
-}
-
 #[derive(Clone)]
 struct CounterSubsystem {
 	stop_signals_received: Arc<atomic::AtomicUsize>,
@@ -802,10 +685,10 @@ impl CounterSubsystem {
 	}
 }
 
-impl<C, M> Subsystem<C> for CounterSubsystem
-	where
-		C: SubsystemContext<Message=M>,
-		M: Send,
+impl<C, M> Subsystem<C, SubsystemError> for CounterSubsystem
+where
+	C: overseer::SubsystemContext<Message=M,Signal=OverseerSignal,AllMessages=AllMessages>,
+	M: Send,
 {
 	fn start(self, mut ctx: C) -> SpawnedSubsystem {
 		SpawnedSubsystem {
@@ -1112,7 +995,7 @@ fn context_holds_onto_message_until_enough_signals_received() {
 	let (unbounded_tx, unbounded_rx) = metered::unbounded();
 	let (to_overseer_tx, _to_overseer_rx) = metered::unbounded();
 
-	let mut ctx = OverseerSubsystemContext::<()>::new_unmetered(
+	let mut ctx = OverseerSubsystemContext::new(
 		signal_rx,
 		stream::select(bounded_rx, unbounded_rx),
 		channels_out,
diff --git a/polkadot/node/overseer/subsystems-gen/tests/ui/ok-01-w-generics.rs b/polkadot/node/overseer/subsystems-gen/tests/ui/ok-01-w-generics.rs
deleted file mode 100644
index 1519990a0a5580bb5920508193e1491bf2cec7ff..0000000000000000000000000000000000000000
--- a/polkadot/node/overseer/subsystems-gen/tests/ui/ok-01-w-generics.rs
+++ /dev/null
@@ -1,17 +0,0 @@
-#![allow(dead_code)]
-
-use polkadot_procmacro_overseer_subsystems_gen::AllSubsystemsGen;
-
-#[derive(Clone, AllSubsystemsGen)]
-struct AllSubsystems<A, B> {
-    a: A,
-    b: B,
-}
-
-fn main() {
-    let all = AllSubsystems::<u8, u16> {
-        a: 0u8,
-        b: 1u16,
-    };
-    let _all: AllSubsystems<_,_> = all.replace_a::<u32>(777_777u32);
-}
diff --git a/polkadot/node/service/src/grandpa_support.rs b/polkadot/node/service/src/grandpa_support.rs
index 407bedf671c00322341535dec07c9223bdb30669..7736ce3a18cfae5c996939be19e4b0a3f93d586e 100644
--- a/polkadot/node/service/src/grandpa_support.rs
+++ b/polkadot/node/service/src/grandpa_support.rs
@@ -27,7 +27,7 @@ use {
 	polkadot_primitives::v1::{Hash, Block as PolkadotBlock, Header as PolkadotHeader},
 	polkadot_subsystem::messages::ApprovalVotingMessage,
 	prometheus_endpoint::{self, Registry},
-	polkadot_overseer::OverseerHandler,
+	polkadot_overseer::Handle,
 	futures::channel::oneshot,
 };
 
@@ -41,13 +41,13 @@ use {
 #[derive(Clone)]
 pub(crate) struct ApprovalCheckingVotingRule {
 	checking_lag: Option<prometheus_endpoint::Gauge<prometheus_endpoint::U64>>,
-	overseer: OverseerHandler,
+	overseer: Handle,
 }
 
 #[cfg(feature = "full-node")]
 impl ApprovalCheckingVotingRule {
 	/// Create a new approval checking diagnostic voting rule.
-	pub fn new(overseer: OverseerHandler, registry: Option<&Registry>)
+	pub fn new(overseer: Handle, registry: Option<&Registry>)
 		-> Result<Self, prometheus_endpoint::PrometheusError>
 	{
 		Ok(ApprovalCheckingVotingRule {
diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs
index 0484c2b9a0f5671febc07d56c8e3a3f639128942..50c2af707198b29d2f26550daa95760313d5a644 100644
--- a/polkadot/node/service/src/lib.rs
+++ b/polkadot/node/service/src/lib.rs
@@ -56,7 +56,7 @@ pub use {
 	sp_authority_discovery::AuthorityDiscoveryApi,
 	sc_client_api::AuxStore,
 	polkadot_primitives::v1::ParachainHost,
-	polkadot_overseer::{Overseer, OverseerHandler},
+	polkadot_overseer::{Overseer, Handle},
 };
 pub use sp_core::traits::SpawnNamed;
 
@@ -427,7 +427,7 @@ fn new_partial<RuntimeApi, Executor>(
 pub struct NewFull<C> {
 	pub task_manager: TaskManager,
 	pub client: C,
-	pub overseer_handler: Option<OverseerHandler>,
+	pub overseer_handler: Option<Handle>,
 	pub network: Arc<sc_network::NetworkService<Block, <Block as BlockT>::Hash>>,
 	pub rpc_handlers: RpcHandlers,
 	pub backend: Arc<FullBackend>,
diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs
index 968240074b8dd3fd9af2ec9ea5156241450a914e..9c2081538f50adc92751777fdf0b433397463503 100644
--- a/polkadot/node/service/src/overseer.rs
+++ b/polkadot/node/service/src/overseer.rs
@@ -28,7 +28,7 @@ use polkadot_network_bridge::RequestMultiplexer;
 use polkadot_node_core_av_store::Config as AvailabilityConfig;
 use polkadot_node_core_approval_voting::Config as ApprovalVotingConfig;
 use polkadot_node_core_candidate_validation::Config as CandidateValidationConfig;
-use polkadot_overseer::{AllSubsystems, BlockInfo, Overseer, OverseerHandler};
+use polkadot_overseer::{AllSubsystems, BlockInfo, Overseer, Handle};
 use polkadot_primitives::v1::ParachainHost;
 use sc_authority_discovery::Service as AuthorityDiscoveryService;
 use sp_api::ProvideRuntimeApi;
@@ -237,7 +237,7 @@ where
 /// would do.
 pub trait OverseerGen {
 	/// Overwrite the full generation of the overseer, including the subsystems.
-	fn generate<'a, Spawner, RuntimeClient>(&self, args: OverseerGenArgs<'a, Spawner, RuntimeClient>) -> Result<(Overseer<Spawner, Arc<RuntimeClient>>, OverseerHandler), Error>
+	fn generate<'a, Spawner, RuntimeClient>(&self, args: OverseerGenArgs<'a, Spawner, RuntimeClient>) -> Result<(Overseer<Spawner, Arc<RuntimeClient>>, Handle), Error>
 	where
 		RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
 		RuntimeClient::Api: ParachainHost<Block> + BabeApi<Block> + AuthorityDiscoveryApi<Block>,
@@ -256,7 +256,7 @@ pub struct RealOverseerGen;
 impl OverseerGen for RealOverseerGen {
 	fn generate<'a, Spawner, RuntimeClient>(&self,
 		args : OverseerGenArgs<'a, Spawner, RuntimeClient>
-	) -> Result<(Overseer<Spawner, Arc<RuntimeClient>>, OverseerHandler), Error>
+	) -> Result<(Overseer<Spawner, Arc<RuntimeClient>>, Handle), Error>
 	where
 		RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
 		RuntimeClient::Api: ParachainHost<Block> + BabeApi<Block> + AuthorityDiscoveryApi<Block>,
diff --git a/polkadot/node/service/src/relay_chain_selection.rs b/polkadot/node/service/src/relay_chain_selection.rs
index cca90e6ae1a9754de5252cf6eaa1e33161d132f0..eecbc4b5eda1147250af1ea783dbe2ca2a84c280 100644
--- a/polkadot/node/service/src/relay_chain_selection.rs
+++ b/polkadot/node/service/src/relay_chain_selection.rs
@@ -41,7 +41,7 @@ use {
 	},
 	polkadot_subsystem::messages::{ApprovalVotingMessage, ChainSelectionMessage},
 	polkadot_node_subsystem_util::metrics::{self, prometheus},
-	polkadot_overseer::OverseerHandler,
+	polkadot_overseer::Handle,
 	futures::channel::oneshot,
 	consensus_common::{Error as ConsensusError, SelectChain},
 	sp_blockchain::HeaderBackend,
@@ -111,7 +111,7 @@ impl Metrics {
 /// A chain-selection implementation which provides safety for relay chains.
 pub struct SelectRelayChain<B> {
 	backend: Arc<B>,
-	overseer: OverseerHandler,
+	overseer: Handle,
 	// A fallback to use in case the overseer is disconnected.
 	//
 	// This is used on relay chains which have not yet enabled
@@ -126,7 +126,7 @@ impl<B> SelectRelayChain<B>
 	/// Create a new [`SelectRelayChain`] wrapping the given chain backend
 	/// and a handle to the overseer.
 	#[allow(unused)]
-	pub fn new(backend: Arc<B>, overseer: OverseerHandler, metrics: Metrics) -> Self {
+	pub fn new(backend: Arc<B>, overseer: Handle, metrics: Metrics) -> Self {
 		SelectRelayChain {
 			fallback: sc_consensus::LongestChain::new(backend.clone()),
 			backend,
@@ -172,7 +172,7 @@ impl<B> SelectRelayChain<B> {
 	#[allow(unused)]
 	pub fn connect_overseer_handler(
 		&mut self,
-		other_handler: &OverseerHandler,
+		other_handler: &Handle,
 	) {
 		other_handler.connect_other(&mut self.overseer);
 	}
diff --git a/polkadot/node/subsystem-test-helpers/src/lib.rs b/polkadot/node/subsystem-test-helpers/src/lib.rs
index cd3550678c1120392d27328994b34299e75ce790..87b28d3963e4e1f474acae5fc57c934d22201a80 100644
--- a/polkadot/node/subsystem-test-helpers/src/lib.rs
+++ b/polkadot/node/subsystem-test-helpers/src/lib.rs
@@ -18,10 +18,11 @@
 
 #![warn(missing_docs)]
 
-use polkadot_node_subsystem::messages::AllMessages;
 use polkadot_node_subsystem::{
-	FromOverseer, SubsystemContext, SubsystemError, SubsystemResult, Subsystem,
-	SpawnedSubsystem, OverseerSignal, SubsystemSender,
+	messages::AllMessages,
+	overseer,
+	FromOverseer, SubsystemContext, SubsystemError, SubsystemResult,
+	SpawnedSubsystem, OverseerSignal,
 };
 use polkadot_node_subsystem_util::TimeoutExt;
 
@@ -172,7 +173,7 @@ pub fn sender_receiver() -> (TestSubsystemSender, mpsc::UnboundedReceiver<AllMes
 }
 
 #[async_trait::async_trait]
-impl SubsystemSender for TestSubsystemSender {
+impl overseer::SubsystemSender<AllMessages> for TestSubsystemSender {
 	async fn send_message(&mut self, msg: AllMessages) {
 		self.tx
 			.send(msg)
@@ -205,11 +206,18 @@ pub struct TestSubsystemContext<M, S> {
 }
 
 #[async_trait::async_trait]
-impl<M: Send + 'static, S: SpawnNamed + Send + 'static> SubsystemContext
+impl<M, S> overseer::SubsystemContext
 	for TestSubsystemContext<M, S>
+where
+	M: std::fmt::Debug + Send + 'static,
+	AllMessages: From<M>,
+	S: SpawnNamed + Send + 'static,
 {
 	type Message = M;
 	type Sender = TestSubsystemSender;
+	type Signal = OverseerSignal;
+	type AllMessages = AllMessages;
+	type Error = SubsystemError;
 
 	async fn try_recv(&mut self) -> Result<Option<FromOverseer<M>>, ()> {
 		match poll!(self.rx.next()) {
@@ -333,10 +341,14 @@ pub fn subsystem_test_harness<M, OverseerFactory, Overseer, TestFactory, Test>(
 /// channel.
 ///
 /// This subsystem is useful for testing functionality that interacts with the overseer.
-pub struct ForwardSubsystem<Msg>(pub mpsc::Sender<Msg>);
+pub struct ForwardSubsystem<M>(pub mpsc::Sender<M>);
 
-impl<C: SubsystemContext<Message = Msg>, Msg: Send + 'static> Subsystem<C> for ForwardSubsystem<Msg> {
-	fn start(mut self, mut ctx: C) -> SpawnedSubsystem {
+impl<M, Context> overseer::Subsystem<Context, SubsystemError> for ForwardSubsystem<M>
+where
+	M: std::fmt::Debug + Send + 'static,
+	Context: SubsystemContext<Message = M> + overseer::SubsystemContext<Message = M>,
+{
+	fn start(mut self, mut ctx: Context) -> SpawnedSubsystem {
 		let future = Box::pin(async move {
 			loop {
 				match ctx.recv().await {
diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..538a8ebab956948f46f920edff2adf5d53e754d4
--- /dev/null
+++ b/polkadot/node/subsystem-types/Cargo.toml
@@ -0,0 +1,37 @@
+[package]
+name = "polkadot-node-subsystem-types"
+version = "0.1.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2018"
+description = "Subsystem traits and message definitions"
+
+[dependencies]
+async-std = "1.8.0"
+async-trait = "0.1.42"
+derive_more = "0.99.11"
+futures = "0.3.12"
+futures-timer = "3.0.2"
+mick-jaeger = "0.1.2"
+lazy_static = "1.4"
+tracing = "0.1.26"
+parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] }
+parking_lot = "0.11.1"
+pin-project = "1.0.4"
+polkadot-primitives = { path = "../../primitives" }
+polkadot-node-primitives = { path = "../primitives" }
+polkadot-node-network-protocol = { path = "../network/protocol" }
+polkadot-statement-table = { path = "../../statement-table" }
+polkadot-node-jaeger = { path = "../jaeger" }
+polkadot-overseer-gen = { path = "../overseer/overseer-gen" }
+sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
+smallvec = "1.6.1"
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
+substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" }
+thiserror = "1.0.23"
+log = "0.4.13"
+
+[dev-dependencies]
+assert_matches = "1.4.0"
+async-trait = "0.1.42"
+futures = { version = "0.3.12", features = ["thread-pool"] }
+polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" }
diff --git a/polkadot/node/subsystem/src/errors.rs b/polkadot/node/subsystem-types/src/errors.rs
similarity index 52%
rename from polkadot/node/subsystem/src/errors.rs
rename to polkadot/node/subsystem-types/src/errors.rs
index acd33cff1dfb2b84daac96fc569ea6c450dbd94c..fffdc04ac1311618cfcf3d30fb06d1ef4bbe4992 100644
--- a/polkadot/node/subsystem/src/errors.rs
+++ b/polkadot/node/subsystem-types/src/errors.rs
@@ -16,6 +16,9 @@
 
 //! Error types for the subsystem requests.
 
+
+use crate::JaegerError;
+
 /// A description of an error causing the runtime API request to be unservable.
 #[derive(Debug, Clone)]
 pub struct RuntimeApiError(String);
@@ -77,3 +80,70 @@ impl std::fmt::Display for RecoveryError {
 }
 
 impl std::error::Error for RecoveryError {}
+
+/// An error type that describes faults that may happen
+///
+/// These are:
+///   * Channels being closed
+///   * Subsystems dying when they are not expected to
+///   * Subsystems not dying when they are told to die
+///   * etc.
+#[derive(thiserror::Error, Debug)]
+#[allow(missing_docs)]
+pub enum SubsystemError {
+	#[error(transparent)]
+	NotifyCancellation(#[from] futures::channel::oneshot::Canceled),
+
+	#[error(transparent)]
+	QueueError(#[from] futures::channel::mpsc::SendError),
+
+	#[error(transparent)]
+	Io(#[from] std::io::Error),
+
+	#[error(transparent)]
+	Infallible(#[from] std::convert::Infallible),
+
+	#[error(transparent)]
+	Prometheus(#[from] substrate_prometheus_endpoint::PrometheusError),
+
+	#[error(transparent)]
+	Jaeger(#[from] JaegerError),
+
+	#[error("Failed to {0}")]
+	Context(String),
+
+	#[error("Subsystem stalled: {0}")]
+	SubsystemStalled(&'static str),
+
+	/// Generated by the `#[overseer(..)]` proc-macro
+	#[error(transparent)]
+	Generated(#[from] ::polkadot_overseer_gen::OverseerError),
+
+	/// Per origin (or subsystem) annotations to wrap an error.
+	#[error("Error originated in {origin}")]
+	FromOrigin {
+		/// An additional annotation tag for the origin of `source`.
+		origin: &'static str,
+		/// The wrapped error. Marked as source for tracking the error chain.
+		#[source] source: Box<dyn 'static + std::error::Error + Send + Sync>
+	},
+}
+
+// impl AnnotateErrorOrigin for SubsystemError {
+// 	fn with_origin(self, origin: &'static str) -> Self {
+// 		Self::FromOrigin {
+// 			origin,
+// 			source: Box::new(self),
+// 		}
+// 	}
+// }
+
+impl SubsystemError {
+	/// Adds a `str` as `origin` to the given error `err`.
+	pub fn with_origin<E: 'static + Send + Sync + std::error::Error>(origin: &'static str, err: E) -> Self {
+		Self::FromOrigin { origin, source: Box::new(err) }
+	}
+}
+
+/// Ease the use of subsystem errors.
+pub type SubsystemResult<T> = Result<T, self::SubsystemError>;
diff --git a/polkadot/node/subsystem-types/src/lib.rs b/polkadot/node/subsystem-types/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..f3d350898c06bacce167802dec21086c3ba00958
--- /dev/null
+++ b/polkadot/node/subsystem-types/src/lib.rs
@@ -0,0 +1,152 @@
+// Copyright 2017-2020 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Subsystem trait definitions and message types.
+//!
+//! Node-side logic for Polkadot is mostly comprised of Subsystems, which are discrete components
+//! that communicate via message-passing. They are coordinated by an overseer, provided by a
+//! separate crate.
+
+#![warn(missing_docs)]
+
+use std::{sync::Arc, fmt};
+
+pub use polkadot_primitives::v1::{Hash, BlockNumber};
+use smallvec::SmallVec;
+
+pub mod errors;
+pub mod messages;
+
+pub use polkadot_node_jaeger as jaeger;
+pub use jaeger::*;
+
+/// How many slots are stack-reserved for active leaves updates
+///
+/// If there are fewer than this number of slots, then we've wasted some stack space.
+/// If there are greater than this number of slots, then we fall back to a heap vector.
+const ACTIVE_LEAVES_SMALLVEC_CAPACITY: usize = 8;
+
+
+/// The status of an activated leaf.
+#[derive(Debug, Clone)]
+pub enum LeafStatus {
+	/// A leaf is fresh when it's the first time the leaf has been encountered.
+	/// Most leaves should be fresh.
+	Fresh,
+	/// A leaf is stale when it's encountered for a subsequent time. This will happen
+	/// when the chain is reverted or the fork-choice rule abandons some chain.
+	Stale,
+}
+
+impl LeafStatus {
+	/// Returns a bool indicating fresh status.
+	pub fn is_fresh(&self) -> bool {
+		match *self {
+			LeafStatus::Fresh => true,
+			LeafStatus::Stale => false,
+		}
+	}
+
+	/// Returns a bool indicating stale status.
+	pub fn is_stale(&self) -> bool {
+		match *self {
+			LeafStatus::Fresh => false,
+			LeafStatus::Stale => true,
+		}
+	}
+}
+
+/// Activated leaf.
+#[derive(Debug, Clone)]
+pub struct ActivatedLeaf {
+	/// The block hash.
+	pub hash: Hash,
+	/// The block number.
+	pub number: BlockNumber,
+	/// The status of the leaf.
+	pub status: LeafStatus,
+	/// An associated [`jaeger::Span`].
+	///
+	/// NOTE: Each span should only be kept active as long as the leaf is considered active and should be dropped
+	/// when the leaf is deactivated.
+	pub span: Arc<jaeger::Span>,
+}
+
+/// Changes in the set of active leaves: the parachain heads which we care to work on.
+///
+/// Note that the activated and deactivated fields indicate deltas, not complete sets.
+#[derive(Clone, Default)]
+pub struct ActiveLeavesUpdate {
+	/// New relay chain blocks of interest.
+	pub activated: SmallVec<[ActivatedLeaf; ACTIVE_LEAVES_SMALLVEC_CAPACITY]>,
+	/// Relay chain block hashes no longer of interest.
+	pub deactivated: SmallVec<[Hash; ACTIVE_LEAVES_SMALLVEC_CAPACITY]>,
+}
+
+impl ActiveLeavesUpdate {
+	/// Create a ActiveLeavesUpdate with a single activated hash
+	pub fn start_work(activated: ActivatedLeaf) -> Self {
+		Self { activated: [activated][..].into(), ..Default::default() }
+	}
+
+	/// Create a ActiveLeavesUpdate with a single deactivated hash
+	pub fn stop_work(hash: Hash) -> Self {
+		Self { deactivated: [hash][..].into(), ..Default::default() }
+	}
+
+	/// Is this update empty and doesn't contain any information?
+	pub fn is_empty(&self) -> bool {
+		self.activated.is_empty() && self.deactivated.is_empty()
+	}
+}
+
+impl PartialEq for ActiveLeavesUpdate {
+	/// Equality for `ActiveLeavesUpdate` doesnt imply bitwise equality.
+	///
+	/// Instead, it means equality when `activated` and `deactivated` are considered as sets.
+	fn eq(&self, other: &Self) -> bool {
+		self.activated.len() == other.activated.len() && self.deactivated.len() == other.deactivated.len()
+			&& self.activated.iter().all(|a| other.activated.iter().any(|o| a.hash == o.hash))
+			&& self.deactivated.iter().all(|a| other.deactivated.contains(a))
+	}
+}
+
+impl fmt::Debug for ActiveLeavesUpdate {
+	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+		struct Activated<'a>(&'a [ActivatedLeaf]);
+		impl fmt::Debug for Activated<'_> {
+			fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+				f.debug_list().entries(self.0.iter().map(|e| e.hash)).finish()
+			}
+		}
+
+		f.debug_struct("ActiveLeavesUpdate")
+			.field("activated", &Activated(&self.activated))
+			.field("deactivated", &self.deactivated)
+			.finish()
+	}
+}
+
+/// Signals sent by an overseer to a subsystem.
+#[derive(PartialEq, Clone, Debug)]
+pub enum OverseerSignal {
+	/// Subsystems should adjust their jobs to start and stop work on appropriate block hashes.
+	ActiveLeaves(ActiveLeavesUpdate),
+	/// `Subsystem` is informed of a finalized block by its block hash and number.
+	BlockFinalized(Hash, BlockNumber),
+	/// Conclude the work of the `Overseer` and all `Subsystem`s.
+	Conclude,
+}
diff --git a/polkadot/node/subsystem/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs
similarity index 90%
rename from polkadot/node/subsystem/src/messages.rs
rename to polkadot/node/subsystem-types/src/messages.rs
index f6171a8a3baf5c17fb2f902e6ece07a13e3b2e79..e820ab3569bbaceebd0c300c22140521d93b3ce0 100644
--- a/polkadot/node/subsystem/src/messages.rs
+++ b/polkadot/node/subsystem-types/src/messages.rs
@@ -22,7 +22,6 @@
 //!
 //! Subsystems' APIs are defined separately from their implementation, leading to easier mocking.
 
-use std::{collections::{BTreeMap, HashSet}, sync::Arc};
 
 use futures::channel::{mpsc, oneshot};
 use thiserror::Error;
@@ -48,8 +47,11 @@ use polkadot_primitives::v1::{
 	SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex,
 	ValidatorSignature,
 };
-use polkadot_procmacro_subsystem_dispatch_gen::subsystem_dispatch_gen;
 use polkadot_statement_table::v1::Misbehavior;
+use std::{
+	collections::{BTreeMap, HashSet},
+	sync::Arc,
+};
 
 /// Network events as transmitted to other subsystems, wrapped in their message types.
 pub mod network_bridge_event;
@@ -800,69 +802,6 @@ pub enum ApprovalDistributionMessage {
 pub enum GossipSupportMessage {
 }
 
-/// A message type tying together all message types that are used across Subsystems.
-#[subsystem_dispatch_gen(NetworkBridgeEvent<protocol_v1::ValidationProtocol>)]
-#[derive(Debug, derive_more::From)]
-pub enum AllMessages {
-	/// Message for the validation subsystem.
-	#[skip]
-	CandidateValidation(CandidateValidationMessage),
-	/// Message for the candidate backing subsystem.
-	#[skip]
-	CandidateBacking(CandidateBackingMessage),
-	/// Message for the Chain API subsystem.
-	#[skip]
-	ChainApi(ChainApiMessage),
-	/// Message for the Collator Protocol subsystem.
-	#[skip]
-	CollatorProtocol(CollatorProtocolMessage),
-	/// Message for the statement distribution subsystem.
-	StatementDistribution(StatementDistributionMessage),
-	/// Message for the availability distribution subsystem.
-	#[skip]
-	AvailabilityDistribution(AvailabilityDistributionMessage),
-	/// Message for the availability recovery subsystem.
-	#[skip]
-	AvailabilityRecovery(AvailabilityRecoveryMessage),
-	/// Message for the bitfield distribution subsystem.
-	BitfieldDistribution(BitfieldDistributionMessage),
-	/// Message for the bitfield signing subsystem.
-	#[skip]
-	BitfieldSigning(BitfieldSigningMessage),
-	/// Message for the Provisioner subsystem.
-	#[skip]
-	Provisioner(ProvisionerMessage),
-	/// Message for the Runtime API subsystem.
-	#[skip]
-	RuntimeApi(RuntimeApiMessage),
-	/// Message for the availability store subsystem.
-	#[skip]
-	AvailabilityStore(AvailabilityStoreMessage),
-	/// Message for the network bridge subsystem.
-	#[skip]
-	NetworkBridge(NetworkBridgeMessage),
-	/// Message for the Collation Generation subsystem.
-	#[skip]
-	CollationGeneration(CollationGenerationMessage),
-	/// Message for the Approval Voting subsystem.
-	#[skip]
-	ApprovalVoting(ApprovalVotingMessage),
-	/// Message for the Approval Distribution subsystem.
-	ApprovalDistribution(ApprovalDistributionMessage),
-	/// Message for the Gossip Support subsystem.
-	#[skip]
-	GossipSupport(GossipSupportMessage),
-	/// Message for the dispute coordinator subsystem.
-	#[skip]
-	DisputeCoordinator(DisputeCoordinatorMessage),
-	/// Message for the dispute participation subsystem.
-	#[skip]
-	DisputeParticipation(DisputeParticipationMessage),
-	/// Message for the chain selection subsystem.
-	#[skip]
-	ChainSelection(ChainSelectionMessage),
-}
-
 impl From<IncomingRequest<req_res_v1::PoVFetchingRequest>> for AvailabilityDistributionMessage {
 	fn from(req: IncomingRequest<req_res_v1::PoVFetchingRequest>) -> Self {
 		Self::PoVFetchingRequest(req)
@@ -878,24 +817,3 @@ impl From<IncomingRequest<req_res_v1::CollationFetchingRequest>> for CollatorPro
 		Self::CollationFetchingRequest(req)
 	}
 }
-
-impl From<IncomingRequest<req_res_v1::PoVFetchingRequest>> for AllMessages {
-	fn from(req: IncomingRequest<req_res_v1::PoVFetchingRequest>) -> Self {
-		From::<AvailabilityDistributionMessage>::from(From::from(req))
-	}
-}
-impl From<IncomingRequest<req_res_v1::ChunkFetchingRequest>> for AllMessages {
-	fn from(req: IncomingRequest<req_res_v1::ChunkFetchingRequest>) -> Self {
-		From::<AvailabilityDistributionMessage>::from(From::from(req))
-	}
-}
-impl From<IncomingRequest<req_res_v1::CollationFetchingRequest>> for AllMessages {
-	fn from(req: IncomingRequest<req_res_v1::CollationFetchingRequest>) -> Self {
-		From::<CollatorProtocolMessage>::from(From::from(req))
-	}
-}
-impl From<IncomingRequest<req_res_v1::AvailableDataFetchingRequest>> for AllMessages {
-	fn from(req: IncomingRequest<req_res_v1::AvailableDataFetchingRequest>) -> Self {
-		From::<AvailabilityRecoveryMessage>::from(From::from(req))
-	}
-}
diff --git a/polkadot/node/subsystem/src/messages/network_bridge_event.rs b/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs
similarity index 100%
rename from polkadot/node/subsystem/src/messages/network_bridge_event.rs
rename to polkadot/node/subsystem-types/src/messages/network_bridge_event.rs
index 3186e80e60b1dce1f68d76fe8b052c6ccf5e6c29..84f2d69479ae5939696e6a823de2caf83e29fb2d 100644
--- a/polkadot/node/subsystem/src/messages/network_bridge_event.rs
+++ b/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs
@@ -70,14 +70,14 @@ impl<M> NetworkBridgeEvent<M> {
 		where T: 'a + Clone, &'a T: TryFrom<&'a M, Error = WrongVariant>
 	{
 		Ok(match *self {
+			NetworkBridgeEvent::PeerMessage(ref peer, ref msg)
+				=> NetworkBridgeEvent::PeerMessage(peer.clone(), <&'a T>::try_from(msg)?.clone()),
 			NetworkBridgeEvent::PeerConnected(ref peer, ref role, ref authority_id)
 				=> NetworkBridgeEvent::PeerConnected(peer.clone(), role.clone(), authority_id.clone()),
 			NetworkBridgeEvent::PeerDisconnected(ref peer)
 				=> NetworkBridgeEvent::PeerDisconnected(peer.clone()),
 			NetworkBridgeEvent::NewGossipTopology(ref peers)
 				=> NetworkBridgeEvent::NewGossipTopology(peers.clone()),
-			NetworkBridgeEvent::PeerMessage(ref peer, ref msg)
-				=> NetworkBridgeEvent::PeerMessage(peer.clone(), <&'a T>::try_from(msg)?.clone()),
 			NetworkBridgeEvent::PeerViewChange(ref peer, ref view)
 				=> NetworkBridgeEvent::PeerViewChange(peer.clone(), view.clone()),
 			NetworkBridgeEvent::OurViewChange(ref view)
diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml
index 63bbeee1ab33d226558343391fd840ccd85edc13..f8414a1254b12b67881490575f4a60cac3e84585 100644
--- a/polkadot/node/subsystem-util/Cargo.toml
+++ b/polkadot/node/subsystem-util/Cargo.toml
@@ -19,10 +19,12 @@ tracing = "0.1.26"
 lru = "0.6.5"
 
 polkadot-node-primitives = { path = "../primitives" }
-polkadot-node-subsystem = { path = "../subsystem" }
+polkadot-node-subsystem = { package = "polkadot-node-subsystem", path = "../subsystem" }
 polkadot-node-jaeger = { path = "../jaeger" }
+polkadot-node-metrics = { path = "../metrics" }
 polkadot-node-network-protocol = { path = "../network/protocol" }
 polkadot-primitives = { path = "../../primitives" }
+polkadot-overseer = { path = "../overseer" }
 metered-channel = { path = "../metered-channel"}
 
 sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/node/subsystem-util/src/determine_new_blocks.rs b/polkadot/node/subsystem-util/src/determine_new_blocks.rs
index adfc614beef9afbdd55c56bad0b2584a1fff414d..9d47592d9741ce4189f25f23f72fdc1623cdf913 100644
--- a/polkadot/node/subsystem-util/src/determine_new_blocks.rs
+++ b/polkadot/node/subsystem-util/src/determine_new_blocks.rs
@@ -17,7 +17,10 @@
 //! A utility for fetching all unknown blocks based on a new chain-head hash.
 
 use polkadot_node_subsystem::{
-	messages::ChainApiMessage, SubsystemSender,
+	messages::ChainApiMessage,
+};
+use polkadot_node_subsystem::{
+	SubsystemSender,
 };
 use polkadot_primitives::v1::{Hash, Header, BlockNumber};
 use futures::prelude::*;
@@ -34,13 +37,15 @@ use futures::channel::oneshot;
 /// then the returned list will be empty.
 ///
 /// This may be somewhat expensive when first recovering from major sync.
-pub async fn determine_new_blocks<E>(
-	ctx: &mut impl SubsystemSender,
+pub async fn determine_new_blocks<E, Sender>(
+	sender: &mut Sender,
 	is_known: impl Fn(&Hash) -> Result<bool, E>,
 	head: Hash,
 	header: &Header,
 	lower_bound_number: BlockNumber,
-) -> Result<Vec<(Hash, Header)>, E> {
+) -> Result<Vec<(Hash, Header)>, E> where
+	Sender: SubsystemSender,
+{
 	const ANCESTRY_STEP: usize = 4;
 
 	let min_block_needed = lower_bound_number + 1;
@@ -87,7 +92,7 @@ pub async fn determine_new_blocks<E>(
 		let batch_hashes = if ancestry_step == 1 {
 			vec![last_header.parent_hash]
 		} else {
-			ctx.send_message(ChainApiMessage::Ancestors {
+			sender.send_message(ChainApiMessage::Ancestors {
 				hash: *last_hash,
 				k: ancestry_step,
 				response_channel: tx,
@@ -105,8 +110,8 @@ pub async fn determine_new_blocks<E>(
 				.map(|_| oneshot::channel())
 				.unzip::<_, _, Vec<_>, Vec<_>>();
 
-			for (hash, sender) in batch_hashes.iter().cloned().zip(batch_senders) {
-				ctx.send_message(ChainApiMessage::BlockHeader(hash, sender).into()).await;
+			for (hash, batched_sender) in batch_hashes.iter().cloned().zip(batch_senders) {
+				sender.send_message(ChainApiMessage::BlockHeader(hash, batched_sender).into()).await;
 			}
 
 			let mut requests = futures::stream::FuturesOrdered::new();
@@ -156,7 +161,7 @@ mod tests {
 	use super::*;
 	use std::collections::{HashSet, HashMap};
 	use sp_core::testing::TaskExecutor;
-	use polkadot_node_subsystem::{messages::AllMessages, SubsystemContext};
+	use polkadot_overseer::{AllMessages, SubsystemContext};
 	use polkadot_node_subsystem_test_helpers::make_subsystem_context;
 	use assert_matches::assert_matches;
 
@@ -606,7 +611,7 @@ mod tests {
 				}
 			);
 
-			for _ in 0..2 {
+			for _ in 0_u8..2 {
 				assert_matches!(
 					handle.recv().await,
 					AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => {
diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs
index 85348b1457c9083786a2da044d228837d14ab915..096957167b455b588ccc0b00aec273a69367d29b 100644
--- a/polkadot/node/subsystem-util/src/lib.rs
+++ b/polkadot/node/subsystem-util/src/lib.rs
@@ -25,21 +25,46 @@
 #![warn(missing_docs)]
 
 use polkadot_node_subsystem::{
+	overseer,
 	errors::RuntimeApiError,
-	messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest, RuntimeApiSender, BoundToRelayParent},
-	FromOverseer, SpawnedSubsystem, Subsystem, SubsystemContext, SubsystemError, SubsystemSender,
+	messages::{
+		AllMessages,
+		RuntimeApiMessage,
+		RuntimeApiRequest,
+		RuntimeApiSender,
+		BoundToRelayParent,
+	},
 	ActiveLeavesUpdate, OverseerSignal,
+	SubsystemSender,
+	errors::{
+		SubsystemError,
+	},
+	SubsystemContext,
+	SpawnedSubsystem,
+	FromOverseer,
 };
+
+pub use overseer::{
+	Subsystem,
+	TimeoutExt,
+	gen::OverseerError,
+	gen::Timeout,
+};
+
+pub use polkadot_node_metrics::{
+	Metronome,
+	metrics,
+};
+
 use polkadot_node_jaeger as jaeger;
 use futures::{channel::{mpsc, oneshot}, prelude::*, select, stream::{Stream, SelectAll}};
-use futures_timer::Delay;
 use parity_scale_codec::Encode;
 use pin_project::pin_project;
 use polkadot_primitives::v1::{
 	CandidateEvent, CommittedCandidateReceipt, CoreState, EncodeAs, PersistedValidationData,
 	GroupRotationInfo, Hash, Id as ParaId, OccupiedCoreAssumption,
 	SessionIndex, Signed, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, SessionInfo,
-	AuthorityDiscoveryId, GroupIndex,
+AuthorityDiscoveryId, GroupIndex,
 };
 use sp_core::{traits::SpawnNamed, Public};
 use sp_application_crypto::AppKey;
@@ -59,8 +84,8 @@ pub use error_handling::{Fault, unwrap_non_fatal};
 
 /// These reexports are required so that external crates can use the `delegated_subsystem` macro properly.
 pub mod reexports {
-	pub use sp_core::traits::SpawnNamed;
-	pub use polkadot_node_subsystem::{
+	pub use polkadot_overseer::gen::{
+		SpawnNamed,
 		SpawnedSubsystem,
 		Subsystem,
 		SubsystemContext,
@@ -112,6 +137,12 @@ pub enum Error {
 	AlreadyForwarding,
 }
 
+impl From<OverseerError> for Error {
+	fn from(e: OverseerError) -> Self {
+		Self::from(SubsystemError::from(e))
+	}
+}
+
 /// A type alias for Runtime API receivers.
 pub type RuntimeApiReceiver<T> = oneshot::Receiver<Result<T, RuntimeApiError>>;
 
@@ -148,13 +179,14 @@ macro_rules! specialize_requests {
 		#[doc = "Request `"]
 		#[doc = $doc_name]
 		#[doc = "` from the runtime"]
-		pub async fn $func_name(
+		pub async fn $func_name (
 			parent: Hash,
 			$(
 				$param_name: $param_ty,
 			)*
 			sender: &mut impl SubsystemSender,
-		) -> RuntimeApiReceiver<$return_ty> {
+		) -> RuntimeApiReceiver<$return_ty>
+		{
 			request_from_runtime(parent, sender, |tx| RuntimeApiRequest::$request_variant(
 				$( $param_name, )* tx
 			)).await
@@ -268,7 +300,8 @@ impl Validator {
 		parent: Hash,
 		keystore: SyncCryptoStorePtr,
 		sender: &mut impl SubsystemSender,
-	) -> Result<Self, Error> {
+	) -> Result<Self, Error>
+	{
 		// Note: request_validators and request_session_index_for_child do not and cannot
 		// run concurrently: they both have a mutable handle to the same sender.
 		// However, each of them returns a oneshot::Receiver, and those are resolved concurrently.
@@ -352,41 +385,6 @@ impl<ToJob> JobHandle<ToJob> {
 	}
 }
 
-/// This module reexports Prometheus types and defines the [`Metrics`] trait.
-pub mod metrics {
-	/// Reexport Substrate Prometheus types.
-	pub use substrate_prometheus_endpoint as prometheus;
-
-
-	/// Subsystem- or job-specific Prometheus metrics.
-	///
-	/// Usually implemented as a wrapper for `Option<ActualMetrics>`
-	/// to ensure `Default` bounds or as a dummy type ().
-	/// Prometheus metrics internally hold an `Arc` reference, so cloning them is fine.
-	pub trait Metrics: Default + Clone {
-		/// Try to register metrics in the Prometheus registry.
-		fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError>;
-
-		/// Convenience method to register metrics in the optional Promethius registry.
-		///
-		/// If no registry is provided, returns `Default::default()`. Otherwise, returns the same
-		/// thing that `try_register` does.
-		fn register(registry: Option<&prometheus::Registry>) -> Result<Self, prometheus::PrometheusError> {
-			match registry {
-				None => Ok(Self::default()),
-				Some(registry) => Self::try_register(registry),
-			}
-		}
-	}
-
-	// dummy impl
-	impl Metrics for () {
-		fn try_register(_registry: &prometheus::Registry) -> Result<(), prometheus::PrometheusError> {
-			Ok(())
-		}
-	}
-}
-
 /// Commands from a job to the broader subsystem.
 pub enum FromJobCommand {
 	/// Spawn a child task on the executor.
@@ -396,12 +394,22 @@ pub enum FromJobCommand {
 }
 
 /// A sender for messages from jobs, as well as commands to the overseer.
-#[derive(Clone)]
-pub struct JobSender<S> {
+pub struct JobSender<S: SubsystemSender> {
 	sender: S,
 	from_job: mpsc::Sender<FromJobCommand>,
 }
 
+// A custom clone impl, since M does not need to impl `Clone`
+// which `#[derive(Clone)]` requires.
+impl<S: SubsystemSender> Clone for JobSender<S> {
+	fn clone(&self) -> Self {
+		Self {
+			sender: self.sender.clone(),
+			from_job: self.from_job.clone(),
+		}
+	}
+}
+
 impl<S: SubsystemSender> JobSender<S> {
 	/// Get access to the underlying subsystem sender.
 	pub fn subsystem_sender(&mut self) -> &mut S {
@@ -409,15 +417,17 @@ impl<S: SubsystemSender> JobSender<S> {
 	}
 
 	/// Send a direct message to some other `Subsystem`, routed based on message type.
-	pub async fn send_message(&mut self, msg: AllMessages) {
-		self.sender.send_message(msg).await
+	pub async fn send_message(&mut self, msg: impl Into<AllMessages>) {
+		self.sender.send_message(msg.into()).await
 	}
 
 	/// Send multiple direct messages to other `Subsystem`s, routed based on message type.
-	pub async fn send_messages<T>(&mut self, msgs: T)
-		where T: IntoIterator<Item = AllMessages> + Send, T::IntoIter: Send
+	pub async fn send_messages<T, M>(&mut self, msgs: T)
+	where
+		T: IntoIterator<Item = M> + Send, T::IntoIter: Send,
+		M: Into<AllMessages>,
 	{
-		self.sender.send_messages(msgs).await
+		self.sender.send_messages(msgs.into_iter().map(|m| m.into())).await
 	}
 
 
@@ -426,8 +436,8 @@ impl<S: SubsystemSender> JobSender<S> {
 	///
 	/// This function should be used only when there is some other bounding factor on the messages
 	/// sent with it. Otherwise, it risks a memory leak.
-	pub fn send_unbounded_message(&mut self, msg: AllMessages) {
-		self.sender.send_unbounded_message(msg)
+	pub fn send_unbounded_message(&mut self, msg: impl Into<AllMessages>) {
+		self.sender.send_unbounded_message(msg.into())
 	}
 
 	/// Send a command to the subsystem, to be relayed onwards to the overseer.
@@ -436,20 +446,25 @@ impl<S: SubsystemSender> JobSender<S> {
 	}
 }
 
+
 #[async_trait::async_trait]
-impl<S: SubsystemSender> SubsystemSender for JobSender<S> {
-	async fn send_message(&mut self, msg: AllMessages) {
-		self.sender.send_message(msg).await
+impl<S, M> overseer::SubsystemSender<M> for JobSender<S>
+where
+	M: Send + 'static + Into<AllMessages>,
+	S: SubsystemSender + Clone,
+{
+	async fn send_message(&mut self, msg: M) {
+		self.sender.send_message(msg.into()).await
 	}
 
 	async fn send_messages<T>(&mut self, msgs: T)
-		where T: IntoIterator<Item = AllMessages> + Send, T::IntoIter: Send
+		where T: IntoIterator<Item = M> + Send, T::IntoIter: Send
 	{
-		self.sender.send_messages(msgs).await
+		self.sender.send_messages(msgs.into_iter().map(|m| m.into())).await
 	}
 
-	fn send_unbounded_message(&mut self, msg: AllMessages) {
-		self.sender.send_unbounded_message(msg)
+	fn send_unbounded_message(&mut self, msg: M) {
+		self.sender.send_unbounded_message(msg.into())
 	}
 }
 
@@ -525,7 +540,11 @@ struct Jobs<Spawner, ToJob> {
 	outgoing_msgs: SelectAll<mpsc::Receiver<FromJobCommand>>,
 }
 
-impl<Spawner: SpawnNamed, ToJob: Send + 'static> Jobs<Spawner, ToJob> {
+impl<Spawner, ToJob> Jobs<Spawner, ToJob>
+where
+	Spawner: SpawnNamed,
+	ToJob: Send + 'static,
+{
 	/// Create a new Jobs manager which handles spawning appropriate jobs.
 	pub fn new(spawner: Spawner) -> Self {
 		Self {
@@ -544,7 +563,9 @@ impl<Spawner: SpawnNamed, ToJob: Send + 'static> Jobs<Spawner, ToJob> {
 		metrics: Job::Metrics,
 		sender: Sender,
 	)
-		where Job: JobTrait<ToJob = ToJob>, Sender: SubsystemSender,
+		where
+			Job: JobTrait<ToJob = ToJob>,
+			Sender: SubsystemSender,
 	{
 		let (to_job_tx, to_job_rx) = mpsc::channel(JOB_CHANNEL_CAPACITY);
 		let (from_job_tx, from_job_rx) = mpsc::channel(JOB_CHANNEL_CAPACITY);
@@ -664,11 +685,12 @@ impl<Job: JobTrait, Spawner> JobSubsystem<Job, Spawner> {
 	pub async fn run<Context>(self, mut ctx: Context)
 		where
 			Spawner: SpawnNamed + Send + Clone + Unpin + 'static,
-			Context: SubsystemContext,
+			Context: SubsystemContext<Message=<Job as JobTrait>::ToJob, Signal=OverseerSignal>,
+			<Context as SubsystemContext>::Sender: SubsystemSender,
 			Job: 'static + JobTrait + Send,
-			Job::RunArgs: Clone + Sync,
-			Job::ToJob: From<<Context as SubsystemContext>::Message> + Sync,
-			Job::Metrics: Sync,
+			<Job as JobTrait>::RunArgs: Clone + Sync,
+			<Job as JobTrait>::ToJob: Sync + From<<Context as polkadot_overseer::SubsystemContext>::Message>,
+			<Job as JobTrait>::Metrics: Sync,
 	{
 		let JobSubsystem {
 			params: JobSubsystemParams {
@@ -679,7 +701,7 @@ impl<Job: JobTrait, Spawner> JobSubsystem<Job, Spawner> {
 			..
 		} = self;
 
-		let mut jobs = Jobs::new(spawner);
+		let mut jobs = Jobs::<Spawner, Job::ToJob>::new(spawner);
 
 		loop {
 			select! {
@@ -690,7 +712,7 @@ impl<Job: JobTrait, Spawner> JobSubsystem<Job, Spawner> {
 							deactivated,
 						}))) => {
 							for activated in activated {
-								let sender: Context::Sender = ctx.sender().clone();
+								let sender = ctx.sender().clone();
 								jobs.spawn_job::<Job, _>(
 									activated.hash,
 									activated.span,
@@ -710,7 +732,7 @@ impl<Job: JobTrait, Spawner> JobSubsystem<Job, Spawner> {
 						}
 						Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(..))) => {}
 						Ok(FromOverseer::Communication { msg }) => {
-							if let Ok(to_job) = <Job::ToJob>::try_from(msg) {
+							if let Ok(to_job) = <<Context as SubsystemContext>::Message>::try_from(msg) {
 								jobs.send_msg(to_job.relay_parent(), to_job).await;
 							}
 						}
@@ -725,10 +747,12 @@ impl<Job: JobTrait, Spawner> JobSubsystem<Job, Spawner> {
 					}
 				}
 				outgoing = jobs.next() => {
+					// TODO verify the introduced .await here is not a problem
+					// TODO it should only wait for the spawn to complete
+					// TODO but not for anything beyond that
 					let res = match outgoing.expect("the Jobs stream never ends; qed") {
 						FromJobCommand::Spawn(name, task) => ctx.spawn(name, task),
-						FromJobCommand::SpawnBlocking(name, task)
-							=> ctx.spawn_blocking(name, task),
+						FromJobCommand::SpawnBlocking(name, task) => ctx.spawn_blocking(name, task),
 					};
 
 					if let Err(e) = res {
@@ -741,13 +765,13 @@ impl<Job: JobTrait, Spawner> JobSubsystem<Job, Spawner> {
 	}
 }
 
-impl<Context, Job, Spawner> Subsystem<Context> for JobSubsystem<Job, Spawner>
+impl<Context, Job, Spawner> Subsystem<Context, SubsystemError> for JobSubsystem<Job, Spawner>
 where
 	Spawner: SpawnNamed + Send + Clone + Unpin + 'static,
-	Context: SubsystemContext,
+	Context: SubsystemContext<Message=Job::ToJob,Signal=OverseerSignal>,
 	Job: 'static + JobTrait + Send,
 	Job::RunArgs: Clone + Sync,
-	Job::ToJob: From<<Context as SubsystemContext>::Message> + Sync,
+	<Job as JobTrait>::ToJob: Sync + From<<Context as polkadot_overseer::SubsystemContext>::Message>,
 	Job::Metrics: Sync,
 {
 	fn start(self, ctx: Context) -> SpawnedSubsystem {
@@ -762,101 +786,3 @@ where
 		}
 	}
 }
-
-/// A future that wraps another future with a `Delay` allowing for time-limited futures.
-#[pin_project]
-pub struct Timeout<F: Future> {
-	#[pin]
-	future: F,
-	#[pin]
-	delay: Delay,
-}
-
-/// Extends `Future` to allow time-limited futures.
-pub trait TimeoutExt: Future {
-	/// Adds a timeout of `duration` to the given `Future`.
-	/// Returns a new `Future`.
-	fn timeout(self, duration: Duration) -> Timeout<Self>
-	where
-		Self: Sized,
-	{
-		Timeout {
-			future: self,
-			delay: Delay::new(duration),
-		}
-	}
-}
-
-impl<F: Future> TimeoutExt for F {}
-
-impl<F: Future> Future for Timeout<F> {
-	type Output = Option<F::Output>;
-
-	fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Self::Output> {
-		let this = self.project();
-
-		if this.delay.poll(ctx).is_ready() {
-			return Poll::Ready(None);
-		}
-
-		if let Poll::Ready(output) = this.future.poll(ctx) {
-			return Poll::Ready(Some(output));
-		}
-
-		Poll::Pending
-	}
-}
-
-
-#[derive(Copy, Clone)]
-enum MetronomeState {
-	Snooze,
-	SetAlarm,
-}
-
-/// Create a stream of ticks with a defined cycle duration.
-pub struct Metronome {
-	delay: Delay,
-	period: Duration,
-	state: MetronomeState,
-}
-
-impl Metronome
-{
-	/// Create a new metronome source with a defined cycle duration.
-	pub fn new(cycle: Duration) -> Self {
-		let period = cycle.into();
-		Self {
-			period,
-			delay: Delay::new(period),
-			state: MetronomeState::Snooze,
-		}
-	}
-}
-
-impl futures::Stream for Metronome
-{
-	type Item = ();
-	fn poll_next(
-		mut self: Pin<&mut Self>,
-		cx: &mut Context<'_>
-	) -> Poll<Option<Self::Item>> {
-		loop {
-			match self.state {
-				MetronomeState::SetAlarm => {
-					let val = self.period.clone();
-					self.delay.reset(val);
-					self.state = MetronomeState::Snooze;
-				}
-				MetronomeState::Snooze => {
-					if !Pin::new(&mut self.delay).poll(cx).is_ready() {
-						break
-					}
-					self.state = MetronomeState::SetAlarm;
-					return Poll::Ready(Some(()));
-				}
-			}
-		}
-		Poll::Pending
-	}
-}
diff --git a/polkadot/node/subsystem-util/src/rolling_session_window.rs b/polkadot/node/subsystem-util/src/rolling_session_window.rs
index 1b857ee7893c64b97415c483ea4ce8bf4958e2c8..944fe0c05489a462d75f5872852b5c2b3d9d1670 100644
--- a/polkadot/node/subsystem-util/src/rolling_session_window.rs
+++ b/polkadot/node/subsystem-util/src/rolling_session_window.rs
@@ -20,10 +20,12 @@
 //! care about the state of particular blocks.
 
 use polkadot_primitives::v1::{Hash, Header, SessionInfo, SessionIndex};
+
 use polkadot_node_subsystem::{
-	SubsystemContext,
+	overseer,
 	messages::{RuntimeApiMessage, RuntimeApiRequest},
 	errors::RuntimeApiError,
+	SubsystemContext,
 };
 use futures::channel::oneshot;
 
@@ -147,7 +149,7 @@ impl RollingSessionWindow {
 	/// some backwards drift in session index is acceptable.
 	pub async fn cache_session_info_for_head(
 		&mut self,
-		ctx: &mut impl SubsystemContext,
+		ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 		block_hash: Hash,
 		block_header: &Header,
 	) -> Result<SessionWindowUpdate, SessionsUnavailable> {
@@ -162,7 +164,7 @@ impl RollingSessionWindow {
 			ctx.send_message(RuntimeApiMessage::Request(
 				if block_header.number == 0 { block_hash } else { block_header.parent_hash },
 				RuntimeApiRequest::SessionIndexForChild(s_tx),
-			).into()).await;
+			)).await;
 
 			match s_rx.await {
 				Ok(Ok(s)) => s,
@@ -263,7 +265,7 @@ impl RollingSessionWindow {
 }
 
 async fn load_all_sessions(
-	ctx: &mut impl SubsystemContext,
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	block_hash: Hash,
 	start: SessionIndex,
 	end_inclusive: SessionIndex,
@@ -274,7 +276,7 @@ async fn load_all_sessions(
 		ctx.send_message(RuntimeApiMessage::Request(
 			block_hash,
 			RuntimeApiRequest::SessionInfo(i, tx),
-		).into()).await;
+		)).await;
 
 		let session_info = match rx.await {
 			Ok(Ok(Some(s))) => s,
@@ -295,7 +297,7 @@ async fn load_all_sessions(
 mod tests {
 	use super::*;
 	use polkadot_node_subsystem_test_helpers::make_subsystem_context;
-	use polkadot_node_subsystem::messages::AllMessages;
+	use polkadot_node_subsystem::messages::{AllMessages, AvailabilityRecoveryMessage};
 	use sp_core::testing::TaskExecutor;
 	use assert_matches::assert_matches;
 
@@ -331,7 +333,7 @@ mod tests {
 		};
 
 		let pool = TaskExecutor::new();
-		let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
+		let (mut ctx, mut handle) = make_subsystem_context::<AvailabilityRecoveryMessage, _>(pool.clone());
 
 		let hash = header.hash();
 
diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs
index 5b2180f6d249a5e553702ed6ebcdc6bc5ebb5f76..882ecd122cb7466ead9075bd738fcac09c2bd1b9 100644
--- a/polkadot/node/subsystem-util/src/runtime/mod.rs
+++ b/polkadot/node/subsystem-util/src/runtime/mod.rs
@@ -26,6 +26,7 @@ use sp_keystore::{CryptoStore, SyncCryptoStorePtr};
 use polkadot_primitives::v1::{CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash, OccupiedCore, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, ValidatorId, ValidatorIndex};
 use polkadot_node_subsystem::SubsystemContext;
 
+
 use crate::{
 	request_session_index_for_child, request_session_info,
 	request_availability_cores,
@@ -107,13 +108,11 @@ impl RuntimeInfo {
 	}
 
 	/// Get `ExtendedSessionInfo` by relay parent hash.
-	pub async fn get_session_info<'a, Context>(
+	pub async fn get_session_info<'a>(
 		&'a mut self,
-		ctx: &mut Context,
+		ctx: &mut impl SubsystemContext,
 		parent: Hash,
 	) -> Result<&'a ExtendedSessionInfo>
-	where
-		Context: SubsystemContext,
 	{
 		let session_index = self.get_session_index(ctx, parent).await?;
 
@@ -124,14 +123,12 @@ impl RuntimeInfo {
 	///
 	/// `request_session_info` still requires the parent to be passed in, so we take the parent
 	/// in addition to the `SessionIndex`.
-	pub async fn get_session_info_by_index<'a, Context>(
+	pub async fn get_session_info_by_index<'a>(
 		&'a mut self,
-		ctx: &mut Context,
+		ctx: &mut impl SubsystemContext,
 		parent: Hash,
 		session_index: SessionIndex,
 	) -> Result<&'a ExtendedSessionInfo>
-	where
-		Context: SubsystemContext,
 	{
 		if !self.session_info_cache.contains(&session_index) {
 			let session_info =
@@ -225,7 +222,7 @@ pub fn check_signature<Payload, RealPayload>(
 	session_info: &SessionInfo,
 	relay_parent: Hash,
 	signed: UncheckedSigned<Payload, RealPayload>,
-) -> std::result::Result<Signed<Payload, RealPayload>, UncheckedSigned<Payload, RealPayload>> 
+) -> std::result::Result<Signed<Payload, RealPayload>, UncheckedSigned<Payload, RealPayload>>
 where
 	Payload: EncodeAs<RealPayload> + Clone,
 	RealPayload: Encode + Clone,
@@ -243,7 +240,7 @@ where
 
 /// Request availability cores from the runtime.
 pub async fn get_availability_cores<Context>(ctx: &mut Context, relay_parent: Hash)
-	-> Result<Vec<CoreState>> 
+	-> Result<Vec<CoreState>>
 	where
 		Context: SubsystemContext,
 {
@@ -276,8 +273,8 @@ where
 /// Get group rotation info based on the given relay_parent.
 pub async fn get_group_rotation_info<Context>(ctx: &mut Context, relay_parent: Hash)
 	-> Result<GroupRotationInfo>
-	where
-		Context: SubsystemContext
+where
+	Context: SubsystemContext,
 {
 	// We drop `groups` here as we don't need them, because of `RuntimeInfo`. Ideally we would not
 	// fetch them in the first place.
diff --git a/polkadot/node/subsystem-util/src/tests.rs b/polkadot/node/subsystem-util/src/tests.rs
index 10eb7436716d7ac8b43438bca38988de9af67006..828d47baed133ce42838f7762539ab12083a502e 100644
--- a/polkadot/node/subsystem-util/src/tests.rs
+++ b/polkadot/node/subsystem-util/src/tests.rs
@@ -75,7 +75,7 @@ impl JobTrait for FakeCollatorProtocolJob {
 				sender.send_message(CollatorProtocolMessage::Invalid(
 					Default::default(),
 					Default::default(),
-				).into()).await;
+				)).await;
 			}
 
 			// it isn't necessary to break run_loop into its own function,
diff --git a/polkadot/node/subsystem/Cargo.toml b/polkadot/node/subsystem/Cargo.toml
index 1300e2abc7dd30873d064f3587d41bd70293e649..e4b3e0cad504058ebf216630567b16e45b3467f2 100644
--- a/polkadot/node/subsystem/Cargo.toml
+++ b/polkadot/node/subsystem/Cargo.toml
@@ -3,35 +3,9 @@ name = "polkadot-node-subsystem"
 version = "0.1.0"
 authors = ["Parity Technologies <admin@parity.io>"]
 edition = "2018"
-description = "Subsystem traits and message definitions"
+description = "Subsystem traits and message definitions and the generated overseer"
 
 [dependencies]
-async-std = "1.8.0"
-async-trait = "0.1.42"
-derive_more = "0.99.14"
-futures = "0.3.15"
-futures-timer = "3.0.2"
-mick-jaeger = "0.1.2"
-lazy_static = "1.4"
-tracing = "0.1.26"
-parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] }
-parking_lot = "0.11.1"
-pin-project = "1.0.7"
-polkadot-node-primitives = { path = "../primitives" }
-polkadot-node-network-protocol = { path = "../network/protocol" }
-polkadot-primitives = { path = "../../primitives" }
-polkadot-statement-table = { path = "../../statement-table" }
+polkadot-overseer = { path = "../overseer" }
+polkadot-node-subsystem-types = { path = "../subsystem-types" }
 polkadot-node-jaeger = { path = "../jaeger" }
-polkadot-procmacro-subsystem-dispatch-gen = { path = "dispatch-gen" }
-sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
-smallvec = "1.6.1"
-sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
-substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" }
-thiserror = "1.0.23"
-log = "0.4.13"
-
-[dev-dependencies]
-assert_matches = "1.4.0"
-async-trait = "0.1.42"
-futures = { version = "0.3.15", features = ["thread-pool"] }
-polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" }
diff --git a/polkadot/node/subsystem/dispatch-gen/Cargo.toml b/polkadot/node/subsystem/dispatch-gen/Cargo.toml
deleted file mode 100644
index b54c833dd4e35915d56bf0bf1126678c2bdb64ae..0000000000000000000000000000000000000000
--- a/polkadot/node/subsystem/dispatch-gen/Cargo.toml
+++ /dev/null
@@ -1,18 +0,0 @@
-[package]
-name = "polkadot-procmacro-subsystem-dispatch-gen"
-version = "0.1.0"
-authors = ["Parity Technologies <admin@parity.io>"]
-edition = "2018"
-description = "Small proc macro to create the distribution code for network events"
-
-[lib]
-proc-macro = true
-
-[dependencies]
-syn = { version = "1.0.60", features = ["full"] }
-quote = "1.0.9"
-proc-macro2 = "1.0.24"
-assert_matches = "1.5.0"
-
-[dev-dependencies]
-trybuild = "1.0.42"
diff --git a/polkadot/node/subsystem/dispatch-gen/src/lib.rs b/polkadot/node/subsystem/dispatch-gen/src/lib.rs
deleted file mode 100644
index 737712639cff4861f73a1ecbdcb314260b863d76..0000000000000000000000000000000000000000
--- a/polkadot/node/subsystem/dispatch-gen/src/lib.rs
+++ /dev/null
@@ -1,208 +0,0 @@
-// Copyright 2021 Parity Technologies (UK) Ltd.
-// This file is part of Polkadot.
-
-// Polkadot is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Polkadot is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
-
-use proc_macro2::TokenStream;
-use quote::{quote, ToTokens};
-use std::fmt;
-use syn::{parse2, Error, Fields, FieldsNamed, FieldsUnnamed, Ident, ItemEnum, Path, Result, Type, Variant};
-
-#[proc_macro_attribute]
-pub fn subsystem_dispatch_gen(attr: proc_macro::TokenStream, item: proc_macro::TokenStream) -> proc_macro::TokenStream {
-	let attr: TokenStream = attr.into();
-	let item: TokenStream = item.into();
-	let mut backup = item.clone();
-	impl_subsystem_dispatch_gen(attr.into(), item).unwrap_or_else(|err| {
-		backup.extend(err.to_compile_error());
-		backup
-	}).into()
-}
-
-/// An enum variant without base type.
-#[derive(Clone)]
-struct EnumVariantDispatchWithTy {
-	// enum ty name
-	ty: Ident,
-	// variant
-	variant: EnumVariantDispatch,
-}
-
-impl fmt::Debug for EnumVariantDispatchWithTy {
-	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-		write!(f, "{}::{:?}", self.ty, self.variant)
-	}
-}
-
-impl ToTokens for EnumVariantDispatchWithTy {
-	fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
-		if let Some(inner) = &self.variant.inner {
-			let enum_name = &self.ty;
-			let variant_name = &self.variant.name;
-
-			let quoted = quote! {
-				#enum_name::#variant_name(#inner::from(event))
-			};
-			quoted.to_tokens(tokens);
-		}
-	}
-}
-
-/// An enum variant without the base type, contains the relevant inner type.
-#[derive(Clone)]
-struct EnumVariantDispatch {
-	/// variant name
-	name: Ident,
-	/// The inner type for which a `From::from` impl is anticipated from the input type.
-	/// No code will be generated for this enum variant if `inner` is `None`.
-	inner: Option<Type>,
-}
-
-impl fmt::Debug for EnumVariantDispatch {
-	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-		write!(f, "{}(..)", self.name)
-	}
-}
-
-fn prepare_enum_variant(variant: &mut Variant) -> Result<EnumVariantDispatch> {
-	let skip = variant.attrs.iter().find(|attr| attr.path.is_ident("skip")).is_some();
-	variant.attrs = variant.attrs.iter().filter(|attr| !attr.path.is_ident("skip")).cloned().collect::<Vec<_>>();
-
-	let variant = variant.clone();
-	let span = variant.ident.span();
-	let inner = match variant.fields.clone() {
-		// look for one called inner
-		Fields::Named(FieldsNamed { brace_token: _, named }) if !skip => named
-			.iter()
-			.find_map(
-				|field| {
-					if let Some(ident) = &field.ident {
-						if ident == "inner" {
-							return Some(Some(field.ty.clone()))
-						}
-					}
-					None
-				},
-			)
-			.ok_or_else(|| {
-				Error::new(span, "To dispatch with struct enum variant, one element must named `inner`")
-			})?,
-
-		// technically, if it has no inner types we cound not require the #[skip] annotation, but better make it consistent
-		Fields::Unnamed(FieldsUnnamed { paren_token: _, unnamed }) if !skip => unnamed
-			.first()
-			.map(|field| Some(field.ty.clone()))
-			.ok_or_else(|| Error::new(span, "Must be annotated with skip, even if no inner types exist."))?,
-		_ if skip => None,
-		Fields::Unit => {
-			return Err(Error::new(
-				span,
-				"Must be annotated with #[skip].",
-			))
-		}
-		Fields::Unnamed(_) => {
-			return Err(Error::new(
-				span,
-				"Must be annotated with #[skip] or have in `inner` element which impls `From<_>`.",
-			))
-		}
-		Fields::Named(_) => {
-			return Err(Error::new(
-				span,
-				"Must be annotated with #[skip] or the first wrapped type must impl `From<_>`.",
-			))
-		}
-	};
-
-	Ok(EnumVariantDispatch { name: variant.ident, inner })
-}
-
-fn impl_subsystem_dispatch_gen(attr: TokenStream, item: TokenStream) -> Result<proc_macro2::TokenStream> {
-	let event_ty = parse2::<Path>(attr)?;
-
-	let mut ie = parse2::<ItemEnum>(item)?;
-
-	let message_enum = ie.ident.clone();
-	let variants = ie.variants.iter_mut().try_fold(Vec::<EnumVariantDispatchWithTy>::new(), |mut acc, variant| {
-		let variant = prepare_enum_variant(variant)?;
-		if variant.inner.is_some() {
-			acc.push(EnumVariantDispatchWithTy { ty: message_enum.clone(), variant })
-		}
-		Ok::<_, syn::Error>(acc)
-	})?;
-
-	let mut orig = ie.to_token_stream();
-
-	let msg = "Generated by #[subsystem_dispatch_gen] proc-macro.";
-
-	orig.extend(quote! {
-		impl #message_enum {
-			#[doc = #msg]
-			pub fn dispatch_iter(event: #event_ty) -> impl Iterator<Item=Self> + Send {
-				let mut iter = None.into_iter();
-
-				#(
-					let mut iter = iter.chain(std::iter::once(event.focus().ok().map(|event| {
-						#variants
-					})));
-				)*
-				iter.filter_map(|x| x)
-			}
-		}
-	});
-	Ok(orig)
-}
-
-#[cfg(test)]
-mod tests {
-	use super::*;
-
-	#[test]
-	fn basic() {
-		let attr = quote! {
-			NetEvent<foo::Bar>
-		};
-
-		let item = quote! {
-			/// Documentation.
-			#[derive(Clone)]
-			enum AllMessages {
-
-				Sub1(Inner1),
-
-				#[skip]
-				/// D3
-				Sub3,
-
-				/// D4
-				#[skip]
-				Sub4(Inner2),
-
-				/// D2
-				Sub2(Inner2),
-			}
-		};
-
-		let output = impl_subsystem_dispatch_gen(attr, item).expect("Simple example always works. qed");
-		println!("//generated:");
-		println!("{}", output);
-	}
-
-	#[test]
-	fn ui() {
-		let t = trybuild::TestCases::new();
-		t.compile_fail("tests/ui/err-*.rs");
-		t.pass("tests/ui/ok-*.rs");
-	}
-}
diff --git a/polkadot/node/subsystem/dispatch-gen/tests/ui/err-01-missing-skip.rs b/polkadot/node/subsystem/dispatch-gen/tests/ui/err-01-missing-skip.rs
deleted file mode 100644
index 7248a7181e493203bb0a890ffc2e5fe8c8914565..0000000000000000000000000000000000000000
--- a/polkadot/node/subsystem/dispatch-gen/tests/ui/err-01-missing-skip.rs
+++ /dev/null
@@ -1,37 +0,0 @@
-#![allow(dead_code)]
-
-use polkadot_procmacro_subsystem_dispatch_gen::subsystem_dispatch_gen;
-
-/// The event type in question.
-#[derive(Clone, Copy)]
-enum Event {
-	Smth,
-	Else,
-}
-
-impl Event {
-	fn focus(&self) -> std::result::Result<Inner, ()> {
-		unimplemented!("foo")
-	}
-}
-
-/// This should have a `From<Event>` impl but does not.
-#[derive(Clone)]
-enum Inner {
-	Foo,
-	Bar(Event),
-}
-
-#[subsystem_dispatch_gen(Event)]
-#[derive(Clone)]
-enum AllMessages {
-	/// Foo
-	Vvvvvv(Inner),
-
-    /// Missing a `#[skip]` annotation
-    Uuuuu,
-}
-
-fn main() {
-    let _x = AllMessages::dispatch_iter(Event::Else);
-}
diff --git a/polkadot/node/subsystem/dispatch-gen/tests/ui/err-01-missing-skip.stderr b/polkadot/node/subsystem/dispatch-gen/tests/ui/err-01-missing-skip.stderr
deleted file mode 100644
index 855521d2c4efc4f9fa283f1bed13ebf55a267eb1..0000000000000000000000000000000000000000
--- a/polkadot/node/subsystem/dispatch-gen/tests/ui/err-01-missing-skip.stderr
+++ /dev/null
@@ -1,14 +0,0 @@
-error: Must be annotated with #[skip].
-  --> $DIR/err-01-missing-skip.rs:32:5
-   |
-32 |     Uuuuu,
-   |     ^^^^^
-
-error[E0599]: no variant or associated item named `dispatch_iter` found for enum `AllMessages` in the current scope
-  --> $DIR/err-01-missing-skip.rs:36:27
-   |
-27 | enum AllMessages {
-   | ---------------- variant or associated item `dispatch_iter` not found here
-...
-36 |     let _x = AllMessages::dispatch_iter(Event::Else);
-   |                           ^^^^^^^^^^^^^ variant or associated item not found in `AllMessages`
diff --git a/polkadot/node/subsystem/dispatch-gen/tests/ui/err-02-missing-from.rs b/polkadot/node/subsystem/dispatch-gen/tests/ui/err-02-missing-from.rs
deleted file mode 100644
index a7abef2c87096c760ce27f80b195b8820ef16599..0000000000000000000000000000000000000000
--- a/polkadot/node/subsystem/dispatch-gen/tests/ui/err-02-missing-from.rs
+++ /dev/null
@@ -1,41 +0,0 @@
-#![allow(dead_code)]
-
-use polkadot_procmacro_subsystem_dispatch_gen::subsystem_dispatch_gen;
-
-/// The event type in question.
-#[derive(Clone, Copy, Debug)]
-enum Event {
-	Smth,
-	Else,
-}
-
-impl Event {
-	fn focus(&self) -> std::result::Result<Intermediate, ()> {
-		Ok(Intermediate(self.clone()))
-	}
-}
-
-#[derive(Debug, Clone)]
-struct Intermediate(Event);
-
-
-/// This should have a `From<Event>` impl but does not.
-#[derive(Debug, Clone)]
-enum Inner {
-	Foo,
-	Bar(Intermediate),
-}
-
-#[subsystem_dispatch_gen(Event)]
-#[derive(Clone)]
-enum AllMessages {
-	/// Foo
-	Vvvvvv(Inner),
-
-    #[skip]
-    Uuuuu,
-}
-
-fn main() {
-    let _x = AllMessages::dispatch_iter(Event::Else);
-}
diff --git a/polkadot/node/subsystem/dispatch-gen/tests/ui/err-02-missing-from.stderr b/polkadot/node/subsystem/dispatch-gen/tests/ui/err-02-missing-from.stderr
deleted file mode 100644
index bf82201a7e40680ce45152c78096daed8bae31e4..0000000000000000000000000000000000000000
--- a/polkadot/node/subsystem/dispatch-gen/tests/ui/err-02-missing-from.stderr
+++ /dev/null
@@ -1,10 +0,0 @@
-error[E0308]: mismatched types
-  --> $DIR/err-02-missing-from.rs:29:1
-   |
-29 | #[subsystem_dispatch_gen(Event)]
-   | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   | |
-   | expected enum `Inner`, found struct `Intermediate`
-   | help: try using a variant of the expected enum: `Inner::Bar(#[subsystem_dispatch_gen(Event)])`
-   |
-   = note: this error originates in an attribute macro (in Nightly builds, run with -Z macro-backtrace for more info)
diff --git a/polkadot/node/subsystem/dispatch-gen/tests/ui/ok-01-with-intermediate.rs b/polkadot/node/subsystem/dispatch-gen/tests/ui/ok-01-with-intermediate.rs
deleted file mode 100644
index b160bf9ce1c14ab5899e840ab2aee18d380727b8..0000000000000000000000000000000000000000
--- a/polkadot/node/subsystem/dispatch-gen/tests/ui/ok-01-with-intermediate.rs
+++ /dev/null
@@ -1,48 +0,0 @@
-#![allow(dead_code)]
-
-use polkadot_procmacro_subsystem_dispatch_gen::subsystem_dispatch_gen;
-
-/// The event type in question.
-#[derive(Clone, Copy, Debug)]
-enum Event {
-	Smth,
-	Else,
-}
-
-impl Event {
-	fn focus(&self) -> std::result::Result<Intermediate, ()> {
-		Ok(Intermediate(self.clone()))
-	}
-}
-
-
-#[derive(Debug, Clone)]
-struct Intermediate(Event);
-
-
-/// This should have a `From<Event>` impl but does not.
-#[derive(Clone, Debug)]
-enum Inner {
-	Foo,
-	Bar(Intermediate),
-}
-
-impl From<Intermediate> for Inner {
-	fn from(src: Intermediate) -> Self {
-		Inner::Bar(src)
-	}
-}
-
-#[subsystem_dispatch_gen(Event)]
-#[derive(Clone)]
-enum AllMessages {
-	/// Foo
-	Vvvvvv(Inner),
-
-    #[skip]
-    Uuuuu,
-}
-
-fn main() {
-    let _x = AllMessages::dispatch_iter(Event::Else);
-}
diff --git a/polkadot/node/subsystem/src/lib.rs b/polkadot/node/subsystem/src/lib.rs
index f3abd0a51c7713ac10acd3f662360a03d645a3ad..98e76f9f949404f02f0cbaec6e62cbdbf13d27d1 100644
--- a/polkadot/node/subsystem/src/lib.rs
+++ b/polkadot/node/subsystem/src/lib.rs
@@ -14,235 +14,28 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
-//! Subsystem trait definitions and message types.
+//! Subsystem accumulation.
 //!
-//! Node-side logic for Polkadot is mostly comprised of Subsystems, which are discrete components
-//! that communicate via message-passing. They are coordinated by an overseer, provided by a
-//! separate crate.
+//! Node-side types and generated overseer.
 
-#![warn(missing_docs)]
-
-use std::{pin::Pin, sync::Arc, fmt};
-
-use futures::prelude::*;
-use futures::channel::{mpsc, oneshot};
-use futures::future::BoxFuture;
-
-use polkadot_primitives::v1::{Hash, BlockNumber};
-use async_trait::async_trait;
-use smallvec::SmallVec;
-
-pub mod errors;
-pub mod messages;
+#![deny(missing_docs)]
+#![deny(unused_crate_dependencies)]
 
 pub use polkadot_node_jaeger as jaeger;
 pub use jaeger::*;
 
-use self::messages::AllMessages;
-
-/// How many slots are stack-reserved for active leaves updates
-///
-/// If there are fewer than this number of slots, then we've wasted some stack space.
-/// If there are greater than this number of slots, then we fall back to a heap vector.
-const ACTIVE_LEAVES_SMALLVEC_CAPACITY: usize = 8;
-
-
-/// The status of an activated leaf.
-#[derive(Debug, Clone)]
-pub enum LeafStatus {
-	/// A leaf is fresh when it's the first time the leaf has been encountered.
-	/// Most leaves should be fresh.
-	Fresh,
-	/// A leaf is stale when it's encountered for a subsequent time. This will happen
-	/// when the chain is reverted or the fork-choice rule abandons some chain.
-	Stale,
-}
+pub use polkadot_overseer::{OverseerSignal, ActiveLeavesUpdate, self as overseer};
 
-impl LeafStatus {
-	/// Returns a bool indicating fresh status.
-	pub fn is_fresh(&self) -> bool {
-		match *self {
-			LeafStatus::Fresh => true,
-			LeafStatus::Stale => false,
-		}
-	}
+pub use polkadot_node_subsystem_types::{
+	errors::{self, *},
+	ActivatedLeaf,
+	LeafStatus,
+};
 
-	/// Returns a bool indicating stale status.
-	pub fn is_stale(&self) -> bool {
-		match *self {
-			LeafStatus::Fresh => false,
-			LeafStatus::Stale => true,
-		}
-	}
-}
-
-/// Activated leaf.
-#[derive(Debug, Clone)]
-pub struct ActivatedLeaf {
-	/// The block hash.
-	pub hash: Hash,
-	/// The block number.
-	pub number: BlockNumber,
-	/// The status of the leaf.
-	pub status: LeafStatus,
-	/// An associated [`jaeger::Span`].
-	///
-	/// NOTE: Each span should only be kept active as long as the leaf is considered active and should be dropped
-	/// when the leaf is deactivated.
-	pub span: Arc<jaeger::Span>,
-}
-
-/// Changes in the set of active leaves: the parachain heads which we care to work on.
-///
-/// Note that the activated and deactivated fields indicate deltas, not complete sets.
-#[derive(Clone, Default)]
-pub struct ActiveLeavesUpdate {
-	/// New relay chain blocks of interest.
-	pub activated: SmallVec<[ActivatedLeaf; ACTIVE_LEAVES_SMALLVEC_CAPACITY]>,
-	/// Relay chain block hashes no longer of interest.
-	pub deactivated: SmallVec<[Hash; ACTIVE_LEAVES_SMALLVEC_CAPACITY]>,
-}
-
-impl ActiveLeavesUpdate {
-	/// Create a ActiveLeavesUpdate with a single activated hash
-	pub fn start_work(activated: ActivatedLeaf) -> Self {
-		Self { activated: [activated][..].into(), ..Default::default() }
-	}
-
-	/// Create a ActiveLeavesUpdate with a single deactivated hash
-	pub fn stop_work(hash: Hash) -> Self {
-		Self { deactivated: [hash][..].into(), ..Default::default() }
-	}
-
-	/// Is this update empty and doesn't contain any information?
-	pub fn is_empty(&self) -> bool {
-		self.activated.is_empty() && self.deactivated.is_empty()
-	}
-}
-
-impl PartialEq for ActiveLeavesUpdate {
-	/// Equality for `ActiveLeavesUpdate` doesnt imply bitwise equality.
-	///
-	/// Instead, it means equality when `activated` and `deactivated` are considered as sets.
-	fn eq(&self, other: &Self) -> bool {
-		self.activated.len() == other.activated.len() && self.deactivated.len() == other.deactivated.len()
-			&& self.activated.iter().all(|a| other.activated.iter().any(|o| a.hash == o.hash))
-			&& self.deactivated.iter().all(|a| other.deactivated.contains(a))
-	}
-}
-
-impl fmt::Debug for ActiveLeavesUpdate {
-	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-		struct Activated<'a>(&'a [ActivatedLeaf]);
-		impl fmt::Debug for Activated<'_> {
-			fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-				f.debug_list().entries(self.0.iter().map(|e| e.hash)).finish()
-			}
-		}
-
-		f.debug_struct("ActiveLeavesUpdate")
-			.field("activated", &Activated(&self.activated))
-			.field("deactivated", &self.deactivated)
-			.finish()
-	}
-}
-
-/// Signals sent by an overseer to a subsystem.
-#[derive(PartialEq, Clone, Debug)]
-pub enum OverseerSignal {
-	/// Subsystems should adjust their jobs to start and stop work on appropriate block hashes.
-	ActiveLeaves(ActiveLeavesUpdate),
-	/// `Subsystem` is informed of a finalized block by its block hash and number.
-	BlockFinalized(Hash, BlockNumber),
-	/// Conclude the work of the `Overseer` and all `Subsystem`s.
-	Conclude,
-}
-
-/// A message type that a subsystem receives from an overseer.
-/// It wraps signals from an overseer and messages that are circulating
-/// between subsystems.
-///
-/// It is generic over over the message type `M` that a particular `Subsystem` may use.
-#[derive(Debug)]
-pub enum FromOverseer<M> {
-	/// Signal from the `Overseer`.
-	Signal(OverseerSignal),
-
-	/// Some other `Subsystem`'s message.
-	Communication {
-		/// Contained message
-		msg: M,
-	},
-}
-
-impl<M> From<OverseerSignal> for FromOverseer<M> {
-	fn from(signal: OverseerSignal) -> Self {
-		FromOverseer::Signal(signal)
-	}
-}
-
-/// An error type that describes faults that may happen
-///
-/// These are:
-///   * Channels being closed
-///   * Subsystems dying when they are not expected to
-///   * Subsystems not dying when they are told to die
-///   * etc.
-#[derive(thiserror::Error, Debug)]
-#[allow(missing_docs)]
-pub enum SubsystemError {
-	#[error(transparent)]
-	NotifyCancellation(#[from] oneshot::Canceled),
-
-	#[error(transparent)]
-	QueueError(#[from] mpsc::SendError),
-
-	#[error("Failed to spawn a task: {0}")]
-	TaskSpawn(&'static str),
-
-	#[error(transparent)]
-	Infallible(#[from] std::convert::Infallible),
-
-	#[error(transparent)]
-	Prometheus(#[from] substrate_prometheus_endpoint::PrometheusError),
-
-	#[error(transparent)]
-	Jaeger(#[from] JaegerError),
-
-	#[error("Failed to {0}")]
-	Context(String),
-
-	#[error("Subsystem stalled: {0}")]
-	SubsystemStalled(&'static str),
-
-	/// Per origin (or subsystem) annotations to wrap an error.
-	#[error("Error originated in {origin}")]
-	FromOrigin {
-		/// An additional annotation tag for the origin of `source`.
-		origin: &'static str,
-		/// The wrapped error. Marked as source for tracking the error chain.
-		#[source] source: Box<dyn 'static + std::error::Error + Send + Sync>
-	},
-
-	#[error(transparent)]
-	Io(#[from] std::io::Error),
-}
-
-impl SubsystemError {
-	/// Adds a `str` as `origin` to the given error `err`.
-	pub fn with_origin<E: 'static + Send + Sync + std::error::Error>(origin: &'static str, err: E) -> Self {
-		Self::FromOrigin { origin, source: Box::new(err) }
-	}
-}
-
-/// An asynchronous subsystem task..
-///
-/// In essence it's just a newtype wrapping a `BoxFuture`.
-pub struct SpawnedSubsystem {
-	/// Name of the subsystem being spawned.
-	pub name: &'static str,
-	/// The task of the subsystem being spawned.
-	pub future: BoxFuture<'static, SubsystemResult<()>>,
+/// Re-export of all messages type, including the wrapper type.
+pub mod messages {
+	pub use super::overseer::AllMessages;
+	pub use polkadot_node_subsystem_types::messages::*;
 }
 
 /// A `Result` type that wraps [`SubsystemError`].
@@ -250,133 +43,48 @@ pub struct SpawnedSubsystem {
 /// [`SubsystemError`]: struct.SubsystemError.html
 pub type SubsystemResult<T> = Result<T, SubsystemError>;
 
-/// A sender used by subsystems to communicate with other subsystems.
-///
-/// Each clone of this type may add more capacity to the bounded buffer, so clones should
-/// be used sparingly.
-#[async_trait]
-pub trait SubsystemSender: Send + Clone + 'static {
-	/// Send a direct message to some other `Subsystem`, routed based on message type.
-	async fn send_message(&mut self, msg: AllMessages);
-
-	/// Send multiple direct messages to other `Subsystem`s, routed based on message type.
-	async fn send_messages<T>(&mut self, msgs: T)
-		where T: IntoIterator<Item = AllMessages> + Send, T::IntoIter: Send;
-
-	/// Send a message onto the unbounded queue of some other `Subsystem`, routed based on message
-	/// type.
-	///
-	/// This function should be used only when there is some other bounding factor on the messages
-	/// sent with it. Otherwise, it risks a memory leak.
-	fn send_unbounded_message(&mut self, msg: AllMessages);
-}
-
-/// A context type that is given to the [`Subsystem`] upon spawning.
-/// It can be used by [`Subsystem`] to communicate with other [`Subsystem`]s
-/// or spawn jobs.
-///
-/// [`Overseer`]: struct.Overseer.html
-/// [`SubsystemJob`]: trait.SubsystemJob.html
-#[async_trait]
-pub trait SubsystemContext: Send + Sized + 'static {
-	/// The message type of this context. Subsystems launched with this context will expect
-	/// to receive messages of this type.
-	type Message: Send;
-
-	/// The message sender type of this context. Clones of the sender should be used sparingly.
-	type Sender: SubsystemSender;
-
-	/// Try to asynchronously receive a message.
-	///
-	/// This has to be used with caution, if you loop over this without
-	/// using `pending!()` macro you will end up with a busy loop!
-	async fn try_recv(&mut self) -> Result<Option<FromOverseer<Self::Message>>, ()>;
-
-	/// Receive a message.
-	async fn recv(&mut self) -> SubsystemResult<FromOverseer<Self::Message>>;
+// Simplify usage without having to do large scale modifications of all
+// subsystems at once.
 
-	/// Spawn a child task on the executor.
-	fn spawn(&mut self, name: &'static str, s: Pin<Box<dyn Future<Output = ()> + Send>>) -> SubsystemResult<()>;
 
-	/// Spawn a blocking child task on the executor's dedicated thread pool.
-	fn spawn_blocking(
-		&mut self,
-		name: &'static str,
-		s: Pin<Box<dyn Future<Output = ()> + Send>>,
-	) -> SubsystemResult<()>;
+/// Specialized message type originating from the overseer.
+pub type FromOverseer<M> = polkadot_overseer::gen::FromOverseer<M, OverseerSignal>;
 
-	/// Get a mutable reference to the sender.
-	fn sender(&mut self) -> &mut Self::Sender;
+/// Specialized subsystem instance type of subsystems consuming a particular message type.
+pub type SubsystemInstance<Message> = polkadot_overseer::gen::SubsystemInstance<Message, OverseerSignal>;
 
-	/// Send a direct message to some other `Subsystem`, routed based on message type.
-	async fn send_message(&mut self, msg: AllMessages) {
-		self.sender().send_message(msg).await
-	}
+/// Sender trait for the `AllMessages` wrapper.
+pub trait SubsystemSender: polkadot_overseer::gen::SubsystemSender<messages::AllMessages> {
+}
 
-	/// Send multiple direct messages to other `Subsystem`s, routed based on message type.
-	async fn send_messages<T>(&mut self, msgs: T)
-		where T: IntoIterator<Item = AllMessages> + Send, T::IntoIter: Send
-	{
-		self.sender().send_messages(msgs).await
-	}
+impl<T> SubsystemSender for T where T: polkadot_overseer::gen::SubsystemSender<messages::AllMessages> {
+}
 
+/// Spawned subsystem.
+pub type SpawnedSubsystem = polkadot_overseer::gen::SpawnedSubsystem<SubsystemError>;
 
-	/// Send a message onto the unbounded queue of some other `Subsystem`, routed based on message
-	/// type.
-	///
-	/// This function should be used only when there is some other bounding factor on the messages
-	/// sent with it. Otherwise, it risks a memory leak.
-	///
-	/// Generally, for this method to be used, these conditions should be met:
-	/// * There is a communication cycle between subsystems
-	/// * One of the parts of the cycle has a clear bound on the number of messages produced.
-	fn send_unbounded_message(&mut self, msg: AllMessages) {
-		self.sender().send_unbounded_message(msg)
-	}
-}
 
-/// A trait that describes the [`Subsystem`]s that can run on the [`Overseer`].
-///
-/// It is generic over the message type circulating in the system.
-/// The idea that we want some type contaning persistent state that
-/// can spawn actually running subsystems when asked to.
-///
-/// [`Overseer`]: struct.Overseer.html
-/// [`Subsystem`]: trait.Subsystem.html
-pub trait Subsystem<C: SubsystemContext> {
-	/// Start this `Subsystem` and return `SpawnedSubsystem`.
-	fn start(self, ctx: C) -> SpawnedSubsystem;
+/// Convenience trait specialization.
+pub trait SubsystemContext: polkadot_overseer::gen::SubsystemContext<
+	Signal=OverseerSignal,
+	AllMessages=messages::AllMessages,
+	Error=SubsystemError,
+>
+{
+	/// The message type the subsystem consumes.
+	type Message: std::fmt::Debug + Send + 'static;
+	/// Sender type to communicate with other subsystems.
+	type Sender: SubsystemSender + Send + Clone + 'static;
 }
 
-/// A dummy subsystem that implements [`Subsystem`] for all
-/// types of messages. Used for tests or as a placeholder.
-pub struct DummySubsystem;
-
-impl<C: SubsystemContext> Subsystem<C> for DummySubsystem
+impl<T> SubsystemContext for T
 where
-	C::Message: std::fmt::Debug
+	T: polkadot_overseer::gen::SubsystemContext<
+		Signal=OverseerSignal,
+		AllMessages=messages::AllMessages,
+		Error=SubsystemError,
+	>,
 {
-	fn start(self, mut ctx: C) -> SpawnedSubsystem {
-		let future = Box::pin(async move {
-			loop {
-				match ctx.recv().await {
-					Err(_) => return Ok(()),
-					Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => return Ok(()),
-					Ok(overseer_msg) => {
-						tracing::debug!(
-							target: "dummy-subsystem",
-							"Discarding a message sent from overseer {:?}",
-							overseer_msg
-						);
-						continue;
-					}
-				}
-			}
-		});
-
-		SpawnedSubsystem {
-			name: "dummy-subsystem",
-			future,
-		}
-	}
+	type Message = <Self as polkadot_overseer::gen::SubsystemContext>::Message;
+	type Sender = <Self as polkadot_overseer::gen::SubsystemContext>::Sender;
 }
diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs
index 121b7306623bd431db4e50bc61b747bfc88bba7c..0c5b2e3738eb215e8534294bb5d109353031733f 100644
--- a/polkadot/node/test/service/src/lib.rs
+++ b/polkadot/node/test/service/src/lib.rs
@@ -22,7 +22,7 @@ pub mod chain_spec;
 
 pub use chain_spec::*;
 use futures::future::Future;
-use polkadot_overseer::OverseerHandler;
+use polkadot_overseer::Handle;
 use polkadot_primitives::v1::{
 	Id as ParaId, HeadData, ValidationCode, Balance, CollatorPair,
 };
@@ -290,7 +290,7 @@ pub struct PolkadotTestNode {
 	/// Client's instance.
 	pub client: Arc<Client>,
 	/// The overseer handler.
-	pub overseer_handler: OverseerHandler,
+	pub overseer_handler: Handle,
 	/// The `MultiaddrWithPeerId` to this node. This is useful if you want to pass it as "boot node" to other nodes.
 	pub addr: MultiaddrWithPeerId,
 	/// RPCHandlers to make RPC queries.