diff --git a/polkadot/Cargo.lock b/polkadot/Cargo.lock
index 7b789052c03fa94eeef85cf93fd1b11f398af943..602d122c05dabd172ca8706e83a95517412a2677 100644
--- a/polkadot/Cargo.lock
+++ b/polkadot/Cargo.lock
@@ -5816,7 +5816,6 @@ dependencies = [
  "polkadot-node-subsystem-util",
  "polkadot-primitives",
  "rand 0.8.4",
- "sc-keystore",
  "sc-network",
  "smallvec 1.6.1",
  "sp-application-crypto",
@@ -5944,6 +5943,39 @@ dependencies = [
  "sp-std",
 ]
 
+[[package]]
+name = "polkadot-dispute-distribution"
+version = "0.1.0"
+dependencies = [
+ "assert_matches",
+ "async-trait",
+ "futures 0.3.15",
+ "futures-timer 3.0.2",
+ "lazy_static",
+ "lru",
+ "maplit",
+ "parity-scale-codec",
+ "polkadot-erasure-coding",
+ "polkadot-node-core-runtime-api",
+ "polkadot-node-network-protocol",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-test-helpers",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "rand 0.8.4",
+ "sc-keystore",
+ "sc-network",
+ "smallvec 1.6.1",
+ "sp-application-crypto",
+ "sp-core",
+ "sp-keyring",
+ "sp-keystore",
+ "sp-tracing",
+ "thiserror",
+ "tracing",
+]
+
 [[package]]
 name = "polkadot-erasure-coding"
 version = "0.9.8"
@@ -5970,7 +6002,6 @@ dependencies = [
  "polkadot-primitives",
  "rand 0.8.4",
  "rand_chacha 0.3.1",
- "sc-keystore",
  "sp-application-crypto",
  "sp-consensus-babe",
  "sp-core",
@@ -5995,7 +6026,6 @@ dependencies = [
  "polkadot-node-subsystem-util",
  "polkadot-overseer",
  "polkadot-primitives",
- "sc-authority-discovery",
  "sc-network",
  "sp-consensus",
  "sp-core",
@@ -6350,11 +6380,13 @@ dependencies = [
 name = "polkadot-node-network-protocol"
 version = "0.1.0"
 dependencies = [
+ "async-trait",
  "futures 0.3.15",
  "parity-scale-codec",
  "polkadot-node-jaeger",
  "polkadot-node-primitives",
  "polkadot-primitives",
+ "sc-authority-discovery",
  "sc-network",
  "strum",
  "thiserror",
@@ -6379,6 +6411,7 @@ dependencies = [
  "sp-maybe-compressed-blob",
  "sp-runtime",
  "thiserror",
+ "tracing",
  "zstd",
 ]
 
@@ -6407,9 +6440,13 @@ dependencies = [
  "polkadot-overseer",
  "polkadot-primitives",
  "polkadot-statement-table",
+ "sc-keystore",
  "sc-network",
  "smallvec 1.6.1",
+ "sp-application-crypto",
  "sp-core",
+ "sp-keyring",
+ "sp-keystore",
  "tracing",
 ]
 
@@ -6828,6 +6865,7 @@ dependencies = [
  "polkadot-availability-recovery",
  "polkadot-client",
  "polkadot-collator-protocol",
+ "polkadot-dispute-distribution",
  "polkadot-gossip-support",
  "polkadot-network-bridge",
  "polkadot-node-collation-generation",
diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml
index f03e91485d2f5d2bc57d8796e70437cc25301df7..dc98195a19a51b02ef8cb101ac5d391fd68c00cc 100644
--- a/polkadot/Cargo.toml
+++ b/polkadot/Cargo.toml
@@ -64,6 +64,7 @@ members = [
 	"node/network/availability-recovery",
 	"node/network/collator-protocol",
 	"node/network/gossip-support",
+	"node/network/dispute-distribution",
 	"node/overseer",
 	"node/overseer/overseer-gen",
 	"node/overseer/overseer-gen/proc-macro",
diff --git a/polkadot/node/core/dispute-coordinator/src/lib.rs b/polkadot/node/core/dispute-coordinator/src/lib.rs
index 0bd94103ca8bc9662ba1b7e4562d93b801be0498..c7038e42461100c6d98a644d0c0d2d89026b579c 100644
--- a/polkadot/node/core/dispute-coordinator/src/lib.rs
+++ b/polkadot/node/core/dispute-coordinator/src/lib.rs
@@ -28,22 +28,21 @@
 use std::collections::HashSet;
 use std::sync::Arc;
 
-use polkadot_node_primitives::{CandidateVotes, SignedDisputeStatement};
+use polkadot_node_primitives::{CandidateVotes, DISPUTE_WINDOW, DisputeMessage, SignedDisputeStatement, DisputeMessageCheckError};
 use polkadot_node_subsystem::{
-	overseer,
-	messages::{
-		DisputeCoordinatorMessage, ChainApiMessage, DisputeParticipationMessage,
-	},
-	SubsystemContext, FromOverseer, OverseerSignal, SpawnedSubsystem,
-	SubsystemError,
+	overseer, SubsystemContext, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError,
 	errors::{ChainApiError, RuntimeApiError},
+	messages::{
+		ChainApiMessage, DisputeCoordinatorMessage, DisputeDistributionMessage,
+		DisputeParticipationMessage, ImportStatementsResult
+	}
 };
 use polkadot_node_subsystem_util::rolling_session_window::{
 	RollingSessionWindow, SessionWindowUpdate,
 };
 use polkadot_primitives::v1::{
-	SessionIndex, CandidateHash, Hash, CandidateReceipt, DisputeStatement, ValidatorIndex,
-	ValidatorSignature, BlockNumber, ValidatorPair,
+	BlockNumber, CandidateHash, CandidateReceipt, DisputeStatement, Hash,
+	SessionIndex, SessionInfo, ValidatorIndex, ValidatorPair, ValidatorSignature
 };
 
 use futures::prelude::*;
@@ -61,10 +60,6 @@ mod tests;
 
 const LOG_TARGET: &str = "parachain::dispute-coordinator";
 
-// It would be nice to draw this from the chain state, but we have no tools for it right now.
-// On Polkadot this is 1 day, and on Kusama it's 6 hours.
-const DISPUTE_WINDOW: SessionIndex = 6;
-
 struct State {
 	keystore: Arc<LocalKeystore>,
 	highest_session: Option<SessionIndex>,
@@ -134,6 +129,9 @@ pub enum Error {
 	#[error(transparent)]
 	Oneshot(#[from] oneshot::Canceled),
 
+	#[error("Oneshot send failed")]
+	OneshotSend,
+
 	#[error(transparent)]
 	Subsystem(#[from] SubsystemError),
 
@@ -308,6 +306,7 @@ async fn handle_incoming(
 			candidate_receipt,
 			session,
 			statements,
+			pending_confirmation,
 		} => {
 			handle_import_statements(
 				ctx,
@@ -318,6 +317,7 @@ async fn handle_incoming(
 				candidate_receipt,
 				session,
 				statements,
+				pending_confirmation,
 			).await?;
 		}
 		DisputeCoordinatorMessage::ActiveDisputes(rx) => {
@@ -400,8 +400,13 @@ async fn handle_import_statements(
 	candidate_receipt: CandidateReceipt,
 	session: SessionIndex,
 	statements: Vec<(SignedDisputeStatement, ValidatorIndex)>,
+	pending_confirmation: oneshot::Sender<ImportStatementsResult>,
 ) -> Result<(), Error> {
 	if state.highest_session.map_or(true, |h| session + DISPUTE_WINDOW < h) {
+
+		// It is not valid to participate in an ancient dispute (spam?).
+		pending_confirmation.send(ImportStatementsResult::InvalidImport).map_err(|_| Error::OneshotSend)?;
+
 		return Ok(());
 	}
 
@@ -479,37 +484,54 @@ async fn handle_import_statements(
 	let already_disputed = is_disputed && !was_undisputed;
 	let concluded_valid = votes.valid.len() >= supermajority_threshold;
 
-	let mut tx = db::v1::Transaction::default();
+	{ // Scope so we will only confirm valid import after the import got actually persisted.
+		let mut tx = db::v1::Transaction::default();
 
-	if freshly_disputed && !concluded_valid {
-		// add to active disputes and begin local participation.
-		update_active_disputes(
-			store,
-			config,
-			&mut tx,
-			|active| active.insert(session, candidate_hash),
-		)?;
+		if freshly_disputed && !concluded_valid {
 
-		ctx.send_message(DisputeParticipationMessage::Participate {
-			candidate_hash,
-			candidate_receipt,
-			session,
-			n_validators: n_validators as u32,
-		}).await;
-	}
+			let (report_availability, receive_availability) = oneshot::channel();
+			ctx.send_message(DisputeParticipationMessage::Participate {
+				candidate_hash,
+				candidate_receipt,
+				session,
+				n_validators: n_validators as u32,
+				report_availability,
+			}).await;
 
-	if concluded_valid && already_disputed {
-		// remove from active disputes.
-		update_active_disputes(
-			store,
-			config,
-			&mut tx,
-			|active| active.delete(session, candidate_hash),
-		)?;
+			if !receive_availability.await.map_err(Error::Oneshot)? {
+				pending_confirmation.send(ImportStatementsResult::InvalidImport).map_err(|_| Error::OneshotSend)?;
+				tracing::debug!(
+					target: LOG_TARGET,
+					"Recovering availability failed - invalid import."
+				);
+				return Ok(())
+			}
+
+			// add to active disputes and begin local participation.
+			update_active_disputes(
+				store,
+				config,
+				&mut tx,
+				|active| active.insert(session, candidate_hash),
+			)?;
+
+		}
+
+		if concluded_valid && already_disputed {
+			// remove from active disputes.
+			update_active_disputes(
+				store,
+				config,
+				&mut tx,
+				|active| active.delete(session, candidate_hash),
+			)?;
+		}
+
+		tx.put_candidate_votes(session, candidate_hash, votes.into());
+		tx.write(store, &config.column_config())?;
 	}
 
-	tx.put_candidate_votes(session, candidate_hash, votes.into());
-	tx.write(store, &config.column_config())?;
+	pending_confirmation.send(ImportStatementsResult::ValidImport).map_err(|_| Error::OneshotSend)?;
 
 	Ok(())
 }
@@ -541,7 +563,7 @@ async fn issue_local_statement(
 	valid: bool,
 ) -> Result<(), Error> {
 	// Load session info.
-	let validators = match state.rolling_session_window.session_info(session) {
+	let info = match state.rolling_session_window.session_info(session) {
 		None => {
 			tracing::warn!(
 				target: LOG_TARGET,
@@ -551,9 +573,11 @@ async fn issue_local_statement(
 
 			return Ok(())
 		}
-		Some(info) => info.validators.clone(),
+		Some(info) => info,
 	};
 
+	let validators = info.validators.clone();
+
 	let votes = db::v1::load_candidate_votes(
 		store,
 		&config.column_config(),
@@ -604,8 +628,27 @@ async fn issue_local_statement(
 		}
 	}
 
+	// Get our message out:
+	for (statement, index) in &statements {
+		let dispute_message = match make_dispute_message(info, &votes, statement.clone(), *index) {
+			Err(err) => {
+				tracing::debug!(
+					target: LOG_TARGET,
+					?err,
+					"Creating dispute message failed."
+				);
+				continue
+			}
+			Ok(dispute_message) => dispute_message,
+		};
+
+		ctx.send_message(DisputeDistributionMessage::SendDispute(dispute_message)).await;
+	}
+
+
 	// Do import
 	if !statements.is_empty() {
+		let (pending_confirmation, _rx) = oneshot::channel();
 		handle_import_statements(
 			ctx,
 			store,
@@ -615,12 +658,67 @@ async fn issue_local_statement(
 			candidate_receipt,
 			session,
 			statements,
+			pending_confirmation,
 		).await?;
 	}
 
 	Ok(())
 }
 
+#[derive(Debug, thiserror::Error)]
+enum MakeDisputeMessageError {
+	#[error("There was no opposite vote available")]
+	NoOppositeVote,
+	#[error("Found vote had an invalid validator index that could not be found")]
+	InvalidValidatorIndex,
+	#[error("Statement found in votes had invalid signature.")]
+	InvalidStoredStatement,
+	#[error(transparent)]
+	InvalidStatementCombination(DisputeMessageCheckError),
+}
+
+fn make_dispute_message(
+	info: &SessionInfo,
+	votes: &CandidateVotes,
+	our_vote: SignedDisputeStatement,
+	our_index: ValidatorIndex
+) -> Result<DisputeMessage, MakeDisputeMessageError> {
+
+	let validators = &info.validators;
+
+	let (valid_statement, valid_index, invalid_statement, invalid_index) =
+		if let DisputeStatement::Valid(_) = our_vote.statement() {
+			let (statement_kind, validator_index, validator_signature)
+				= votes.invalid.get(0).ok_or(MakeDisputeMessageError::NoOppositeVote)?.clone();
+			let other_vote = SignedDisputeStatement::new_checked(
+				DisputeStatement::Invalid(statement_kind),
+				our_vote.candidate_hash().clone(),
+				our_vote.session_index(),
+				validators.get(validator_index.0 as usize).ok_or(MakeDisputeMessageError::InvalidValidatorIndex)?.clone(),
+				validator_signature,
+			).map_err(|()| MakeDisputeMessageError::InvalidStoredStatement)?;
+			(our_vote, our_index, other_vote, validator_index)
+	} else {
+		let (statement_kind, validator_index, validator_signature)
+			= votes.valid.get(0).ok_or(MakeDisputeMessageError::NoOppositeVote)?.clone();
+		let other_vote = SignedDisputeStatement::new_checked(
+			DisputeStatement::Valid(statement_kind),
+			our_vote.candidate_hash().clone(),
+			our_vote.session_index(),
+			validators.get(validator_index.0 as usize).ok_or(MakeDisputeMessageError::InvalidValidatorIndex)?.clone(),
+			validator_signature,
+		).map_err(|()| MakeDisputeMessageError::InvalidStoredStatement)?;
+		(other_vote, validator_index, our_vote, our_index)
+	};
+
+	DisputeMessage::from_signed_statements(
+		valid_statement, valid_index,
+		invalid_statement, invalid_index,
+		votes.candidate_receipt.clone(),
+		info,
+	).map_err(MakeDisputeMessageError::InvalidStatementCombination)
+}
+
 fn determine_undisputed_chain(
 	store: &dyn KeyValueDB,
 	config: &Config,
diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs
index dc8c93de968da49d00eacf46e49e72456857a33d..cfa84e3818c12d07b59502d547680ae5f9942275 100644
--- a/polkadot/node/core/dispute-coordinator/src/tests.rs
+++ b/polkadot/node/core/dispute-coordinator/src/tests.rs
@@ -25,7 +25,10 @@ use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystem
 use sp_core::testing::TaskExecutor;
 use sp_keyring::Sr25519Keyring;
 use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr};
-use futures::future::{self, BoxFuture};
+use futures::{
+	channel::oneshot,
+	future::{self, BoxFuture},
+};
 use parity_scale_codec::Encode;
 use assert_matches::assert_matches;
 
@@ -261,6 +264,7 @@ fn conflicting_votes_lead_to_dispute_participation() {
 			false,
 		).await;
 
+		let (pending_confirmation, _confirmation_rx) = oneshot::channel();
 		virtual_overseer.send(FromOverseer::Communication {
 			msg: DisputeCoordinatorMessage::ImportStatements {
 				candidate_hash,
@@ -270,9 +274,9 @@ fn conflicting_votes_lead_to_dispute_participation() {
 					(valid_vote, ValidatorIndex(0)),
 					(invalid_vote, ValidatorIndex(1)),
 				],
+				pending_confirmation,
 			},
 		}).await;
-
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::DisputeParticipation(DisputeParticipationMessage::Participate {
@@ -280,11 +284,13 @@ fn conflicting_votes_lead_to_dispute_participation() {
 				candidate_receipt: c_receipt,
 				session: s,
 				n_validators,
+				report_availability,
 			}) => {
 				assert_eq!(c_hash, candidate_hash);
 				assert_eq!(c_receipt, candidate_receipt);
 				assert_eq!(s, session);
 				assert_eq!(n_validators, test_state.validators.len() as u32);
+				report_availability.send(true).unwrap();
 			}
 		);
 
@@ -310,6 +316,7 @@ fn conflicting_votes_lead_to_dispute_participation() {
 			assert_eq!(votes.invalid.len(), 1);
 		}
 
+		let (pending_confirmation, _confirmation_rx) = oneshot::channel();
 		virtual_overseer.send(FromOverseer::Communication {
 			msg: DisputeCoordinatorMessage::ImportStatements {
 				candidate_hash,
@@ -318,6 +325,7 @@ fn conflicting_votes_lead_to_dispute_participation() {
 				statements: vec![
 					(invalid_vote_2, ValidatorIndex(2)),
 				],
+				pending_confirmation,
 			},
 		}).await;
 
@@ -371,6 +379,7 @@ fn positive_votes_dont_trigger_participation() {
 			true,
 		).await;
 
+		let (pending_confirmation, _confirmation_rx) = oneshot::channel();
 		virtual_overseer.send(FromOverseer::Communication {
 			msg: DisputeCoordinatorMessage::ImportStatements {
 				candidate_hash,
@@ -379,6 +388,7 @@ fn positive_votes_dont_trigger_participation() {
 				statements: vec![
 					(valid_vote, ValidatorIndex(0)),
 				],
+				pending_confirmation,
 			},
 		}).await;
 
@@ -404,6 +414,7 @@ fn positive_votes_dont_trigger_participation() {
 			assert!(votes.invalid.is_empty());
 		}
 
+		let (pending_confirmation, _confirmation_rx) = oneshot::channel();
 		virtual_overseer.send(FromOverseer::Communication {
 			msg: DisputeCoordinatorMessage::ImportStatements {
 				candidate_hash,
@@ -412,6 +423,7 @@ fn positive_votes_dont_trigger_participation() {
 				statements: vec![
 					(valid_vote_2, ValidatorIndex(1)),
 				],
+				pending_confirmation,
 			},
 		}).await;
 
@@ -472,6 +484,7 @@ fn wrong_validator_index_is_ignored() {
 			false,
 		).await;
 
+		let (pending_confirmation, _confirmation_rx) = oneshot::channel();
 		virtual_overseer.send(FromOverseer::Communication {
 			msg: DisputeCoordinatorMessage::ImportStatements {
 				candidate_hash,
@@ -481,6 +494,7 @@ fn wrong_validator_index_is_ignored() {
 					(valid_vote, ValidatorIndex(1)),
 					(invalid_vote, ValidatorIndex(0)),
 				],
+				pending_confirmation,
 			},
 		}).await;
 
@@ -541,6 +555,7 @@ fn finality_votes_ignore_disputed_candidates() {
 			false,
 		).await;
 
+		let (pending_confirmation, _confirmation_rx) = oneshot::channel();
 		virtual_overseer.send(FromOverseer::Communication {
 			msg: DisputeCoordinatorMessage::ImportStatements {
 				candidate_hash,
@@ -550,9 +565,21 @@ fn finality_votes_ignore_disputed_candidates() {
 					(valid_vote, ValidatorIndex(0)),
 					(invalid_vote, ValidatorIndex(1)),
 				],
+				pending_confirmation,
 			},
 		}).await;
-		let _ = virtual_overseer.recv().await;
+
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::DisputeParticipation(
+				DisputeParticipationMessage::Participate {
+					report_availability,
+					..
+				}
+			) => {
+				report_availability.send(true).unwrap();
+			}
+		);
 
 		{
 			let (tx, rx) = oneshot::channel();
@@ -624,6 +651,7 @@ fn supermajority_valid_dispute_may_be_finalized() {
 			false,
 		).await;
 
+		let (pending_confirmation, _confirmation_rx) = oneshot::channel();
 		virtual_overseer.send(FromOverseer::Communication {
 			msg: DisputeCoordinatorMessage::ImportStatements {
 				candidate_hash,
@@ -633,6 +661,7 @@ fn supermajority_valid_dispute_may_be_finalized() {
 					(valid_vote, ValidatorIndex(0)),
 					(invalid_vote, ValidatorIndex(1)),
 				],
+				pending_confirmation,
 			},
 		}).await;
 
@@ -650,12 +679,14 @@ fn supermajority_valid_dispute_may_be_finalized() {
 			statements.push((vote, ValidatorIndex(i as _)));
 		};
 
+		let (pending_confirmation, _confirmation_rx) = oneshot::channel();
 		virtual_overseer.send(FromOverseer::Communication {
 			msg: DisputeCoordinatorMessage::ImportStatements {
 				candidate_hash,
 				candidate_receipt: candidate_receipt.clone(),
 				session,
 				statements,
+				pending_confirmation,
 			},
 		}).await;
 
diff --git a/polkadot/node/core/dispute-participation/src/lib.rs b/polkadot/node/core/dispute-participation/src/lib.rs
index 19827cab41204f412892bd59222b4163de4b192f..619da697857528ef99763be479be95b1c52ce649 100644
--- a/polkadot/node/core/dispute-participation/src/lib.rs
+++ b/polkadot/node/core/dispute-participation/src/lib.rs
@@ -83,6 +83,9 @@ pub enum Error {
 	#[error(transparent)]
 	Oneshot(#[from] oneshot::Canceled),
 
+	#[error("Oneshot receiver died")]
+	OneshotSendFailed,
+
 	#[error(transparent)]
 	Participation(#[from] ParticipationError),
 }
@@ -159,6 +162,7 @@ async fn handle_incoming(
 			candidate_receipt,
 			session,
 			n_validators,
+			report_availability,
 		} => {
 			if let Some((_, block_hash)) = state.recent_block {
 				participate(
@@ -168,6 +172,7 @@ async fn handle_incoming(
 					candidate_receipt,
 					session,
 					n_validators,
+					report_availability,
 				)
 				.await
 			} else {
@@ -184,6 +189,7 @@ async fn participate(
 	candidate_receipt: CandidateReceipt,
 	session: SessionIndex,
 	n_validators: u32,
+	report_availability: oneshot::Sender<bool>,
 ) -> Result<(), Error> {
 	let (recover_available_data_tx, recover_available_data_rx) = oneshot::channel();
 	let (code_tx, code_rx) = oneshot::channel();
@@ -203,14 +209,21 @@ async fn participate(
 	.await;
 
 	let available_data = match recover_available_data_rx.await? {
-		Ok(data) => data,
+		Ok(data) => {
+			report_availability.send(true).map_err(|_| Error::OneshotSendFailed)?;
+			data
+		}
 		Err(RecoveryError::Invalid) => {
+			report_availability.send(true).map_err(|_| Error::OneshotSendFailed)?;
+
 			// the available data was recovered but it is invalid, therefore we'll
 			// vote negatively for the candidate dispute
 			cast_invalid_vote(ctx, candidate_hash, candidate_receipt, session).await;
 			return Ok(());
 		}
 		Err(RecoveryError::Unavailable) => {
+			report_availability.send(false).map_err(|_| Error::OneshotSendFailed)?;
+
 			return Err(ParticipationError::MissingAvailableData(candidate_hash).into());
 		}
 	};
diff --git a/polkadot/node/core/dispute-participation/src/tests.rs b/polkadot/node/core/dispute-participation/src/tests.rs
index a56e204b0551807e984f37f5304acaaee34431c0..734f997338e8758a7b0bdd220d55c400c91df8be 100644
--- a/polkadot/node/core/dispute-participation/src/tests.rs
+++ b/polkadot/node/core/dispute-participation/src/tests.rs
@@ -80,7 +80,7 @@ async fn activate_leaf(virtual_overseer: &mut VirtualOverseer, block_number: Blo
 		.await;
 }
 
-async fn participate(virtual_overseer: &mut VirtualOverseer) {
+async fn participate(virtual_overseer: &mut VirtualOverseer) -> oneshot::Receiver<bool> {
 	let commitments = CandidateCommitments::default();
 	let candidate_receipt = {
 		let mut receipt = CandidateReceipt::default();
@@ -91,6 +91,8 @@ async fn participate(virtual_overseer: &mut VirtualOverseer) {
 	let session = 1;
 	let n_validators = 10;
 
+	let (report_availability, receive_availability) = oneshot::channel();
+
 	virtual_overseer
 		.send(FromOverseer::Communication {
 			msg: DisputeParticipationMessage::Participate {
@@ -98,12 +100,14 @@ async fn participate(virtual_overseer: &mut VirtualOverseer) {
 				candidate_receipt: candidate_receipt.clone(),
 				session,
 				n_validators,
+				report_availability,
 			},
-		})
-		.await;
+	})
+	.await;
+	receive_availability
 }
 
-async fn recover_available_data(virtual_overseer: &mut VirtualOverseer) {
+async fn recover_available_data(virtual_overseer: &mut VirtualOverseer, receive_availability: oneshot::Receiver<bool>) {
 	let pov_block = PoV {
 		block_data: BlockData(Vec::new()),
 	};
@@ -122,6 +126,8 @@ async fn recover_available_data(virtual_overseer: &mut VirtualOverseer) {
 		},
 		"overseer did not receive recover available data message",
 	);
+
+	assert_eq!(receive_availability.await.expect("Availability should get reported"), true);
 }
 
 async fn fetch_validation_code(virtual_overseer: &mut VirtualOverseer) {
@@ -166,7 +172,7 @@ async fn store_available_data(virtual_overseer: &mut VirtualOverseer, success: b
 fn cannot_participate_when_recent_block_state_is_missing() {
 	test_harness(|mut virtual_overseer| {
 		Box::pin(async move {
-			participate(&mut virtual_overseer).await;
+			let _ = participate(&mut virtual_overseer).await;
 
 			virtual_overseer
 		})
@@ -175,7 +181,7 @@ fn cannot_participate_when_recent_block_state_is_missing() {
 	test_harness(|mut virtual_overseer| {
 		Box::pin(async move {
 			activate_leaf(&mut virtual_overseer, 10).await;
-			participate(&mut virtual_overseer).await;
+			let _ = participate(&mut virtual_overseer).await;
 
 			// after activating at least one leaf the recent block
 			// state should be available which should lead to trying
@@ -199,7 +205,7 @@ fn cannot_participate_if_cannot_recover_available_data() {
 	test_harness(|mut virtual_overseer| {
 		Box::pin(async move {
 			activate_leaf(&mut virtual_overseer, 10).await;
-			participate(&mut virtual_overseer).await;
+			let receive_availability = participate(&mut virtual_overseer).await;
 
 			assert_matches!(
 				virtual_overseer.recv().await,
@@ -211,6 +217,8 @@ fn cannot_participate_if_cannot_recover_available_data() {
 				"overseer did not receive recover available data message",
 			);
 
+			assert_eq!(receive_availability.await.expect("Availability should get reported"), false);
+
 			virtual_overseer
 		})
 	});
@@ -221,8 +229,8 @@ fn cannot_participate_if_cannot_recover_validation_code() {
 	test_harness(|mut virtual_overseer| {
 		Box::pin(async move {
 			activate_leaf(&mut virtual_overseer, 10).await;
-			participate(&mut virtual_overseer).await;
-			recover_available_data(&mut virtual_overseer).await;
+			let receive_availability = participate(&mut virtual_overseer).await;
+			recover_available_data(&mut virtual_overseer, receive_availability).await;
 
 			assert_matches!(
 				virtual_overseer.recv().await,
@@ -248,7 +256,7 @@ fn cast_invalid_vote_if_available_data_is_invalid() {
 	test_harness(|mut virtual_overseer| {
 		Box::pin(async move {
 			activate_leaf(&mut virtual_overseer, 10).await;
-			participate(&mut virtual_overseer).await;
+			let receive_availability = participate(&mut virtual_overseer).await;
 
 			assert_matches!(
 				virtual_overseer.recv().await,
@@ -260,6 +268,8 @@ fn cast_invalid_vote_if_available_data_is_invalid() {
 				"overseer did not receive recover available data message",
 			);
 
+			assert_eq!(receive_availability.await.expect("Availability should get reported"), true);
+
 			assert_matches!(
 				virtual_overseer.recv().await,
 				AllMessages::DisputeCoordinator(DisputeCoordinatorMessage::IssueLocalStatement(
@@ -281,8 +291,8 @@ fn cast_invalid_vote_if_validation_fails_or_is_invalid() {
 	test_harness(|mut virtual_overseer| {
 		Box::pin(async move {
 			activate_leaf(&mut virtual_overseer, 10).await;
-			participate(&mut virtual_overseer).await;
-			recover_available_data(&mut virtual_overseer).await;
+			let receive_availability = participate(&mut virtual_overseer).await;
+			recover_available_data(&mut virtual_overseer, receive_availability).await;
 			fetch_validation_code(&mut virtual_overseer).await;
 			store_available_data(&mut virtual_overseer, true).await;
 
@@ -317,8 +327,8 @@ fn cast_invalid_vote_if_validation_passes_but_commitments_dont_match() {
 	test_harness(|mut virtual_overseer| {
 		Box::pin(async move {
 			activate_leaf(&mut virtual_overseer, 10).await;
-			participate(&mut virtual_overseer).await;
-			recover_available_data(&mut virtual_overseer).await;
+			let receive_availability = participate(&mut virtual_overseer).await;
+			recover_available_data(&mut virtual_overseer, receive_availability).await;
 			fetch_validation_code(&mut virtual_overseer).await;
 			store_available_data(&mut virtual_overseer, true).await;
 
@@ -357,8 +367,8 @@ fn cast_valid_vote_if_validation_passes() {
 	test_harness(|mut virtual_overseer| {
 		Box::pin(async move {
 			activate_leaf(&mut virtual_overseer, 10).await;
-			participate(&mut virtual_overseer).await;
-			recover_available_data(&mut virtual_overseer).await;
+			let receive_availability = participate(&mut virtual_overseer).await;
+			recover_available_data(&mut virtual_overseer, receive_availability).await;
 			fetch_validation_code(&mut virtual_overseer).await;
 			store_available_data(&mut virtual_overseer, true).await;
 
@@ -393,8 +403,8 @@ fn failure_to_store_available_data_does_not_preclude_participation() {
 	test_harness(|mut virtual_overseer| {
 		Box::pin(async move {
 			activate_leaf(&mut virtual_overseer, 10).await;
-			participate(&mut virtual_overseer).await;
-			recover_available_data(&mut virtual_overseer).await;
+			let receive_availability = participate(&mut virtual_overseer).await;
+			recover_available_data(&mut virtual_overseer, receive_availability).await;
 			fetch_validation_code(&mut virtual_overseer).await;
 			// the store available data request should fail
 			store_available_data(&mut virtual_overseer, false).await;
diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml
index ea6accbeb8cfdeda5b71020012ca0e72effcbfb7..e39817aedef2c9e9a7444a0765a27c296e1adcd0 100644
--- a/polkadot/node/network/availability-distribution/Cargo.toml
+++ b/polkadot/node/network/availability-distribution/Cargo.toml
@@ -27,7 +27,6 @@ polkadot-subsystem-testhelpers = { package = "polkadot-node-subsystem-test-helpe
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] }
 sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
 futures-timer = "3.0.2"
 assert_matches = "1.4.0"
diff --git a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs
index 650072f0ea7e3b99ed627776292dd9ab783ba463..b091db144dfd275d0c92a11a743de6ec47e27c4b 100644
--- a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs
+++ b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs
@@ -19,10 +19,8 @@
 use futures::{FutureExt, channel::oneshot, future::BoxFuture};
 
 use polkadot_subsystem::jaeger;
-use polkadot_node_network_protocol::{
-	request_response::{OutgoingRequest, Recipient, request::{RequestError, Requests},
-	v1::{PoVFetchingRequest, PoVFetchingResponse}}
-};
+use polkadot_node_network_protocol::request_response::{OutgoingRequest, Recipient, request::{RequestError, Requests},
+	v1::{PoVFetchingRequest, PoVFetchingResponse}};
 use polkadot_primitives::v1::{
 	CandidateHash, Hash, ValidatorIndex,
 };
@@ -49,7 +47,7 @@ pub async fn fetch_pov<Context>(
 where
 	Context: SubsystemContext,
 {
-	let info = &runtime.get_session_info(ctx, parent).await?.session_info;
+	let info = &runtime.get_session_info(ctx.sender(), parent).await?.session_info;
 	let authority_id = info.discovery_keys.get(from_validator.0 as usize)
 		.ok_or(NonFatal::InvalidValidatorIndex)?
 		.clone();
@@ -129,11 +127,12 @@ mod tests {
 	use polkadot_primitives::v1::{CandidateHash, Hash, ValidatorIndex};
 	use polkadot_node_primitives::BlockData;
 	use polkadot_subsystem_testhelpers as test_helpers;
-	use polkadot_subsystem::messages::{AllMessages, AvailabilityDistributionMessage, RuntimeApiMessage, RuntimeApiRequest};
+	use polkadot_subsystem::messages::{AvailabilityDistributionMessage, RuntimeApiMessage, RuntimeApiRequest, AllMessages};
+	use test_helpers::mock::make_ferdie_keystore;
 
 	use super::*;
 	use crate::LOG_TARGET;
-	use crate::tests::mock::{make_session_info, make_ferdie_keystore};
+	use crate::tests::mock::{make_session_info};
 
 	#[test]
 	fn rejects_invalid_pov() {
diff --git a/polkadot/node/network/availability-distribution/src/requester/session_cache.rs b/polkadot/node/network/availability-distribution/src/requester/session_cache.rs
index a7e1d69d78fdb5c19e51920dbf4b3ed67181b791..60503cec85319c580d4fef36e7fdb7a590810418 100644
--- a/polkadot/node/network/availability-distribution/src/requester/session_cache.rs
+++ b/polkadot/node/network/availability-distribution/src/requester/session_cache.rs
@@ -106,7 +106,7 @@ impl SessionCache {
 		Context: SubsystemContext,
 		F: FnOnce(&SessionInfo) -> R,
 	{
-		let session_index = runtime.get_session_index(ctx, parent).await?;
+		let session_index = runtime.get_session_index(ctx.sender(), parent).await?;
 
 		if let Some(o_info) = self.session_info_cache.get(&session_index) {
 			tracing::trace!(target: LOG_TARGET, session_index, "Got session from lru");
@@ -183,7 +183,7 @@ impl SessionCache {
 	where
 		Context: SubsystemContext,
 	{
-		let info = runtime.get_session_info_by_index(ctx, parent, session_index).await?;
+		let info = runtime.get_session_info_by_index(ctx.sender(), parent, session_index).await?;
 
 		let discovery_keys = info.session_info.discovery_keys.clone();
 		let mut validator_groups = info.session_info.validator_groups.clone();
diff --git a/polkadot/node/network/availability-distribution/src/tests/mock.rs b/polkadot/node/network/availability-distribution/src/tests/mock.rs
index 6ba0973400db022c19db6b4ce1b5378e3c5802da..ae17e2be1ffa5da122a32c70117d8189f11b796f 100644
--- a/polkadot/node/network/availability-distribution/src/tests/mock.rs
+++ b/polkadot/node/network/availability-distribution/src/tests/mock.rs
@@ -19,30 +19,16 @@
 
 use std::sync::Arc;
 
-use sc_keystore::LocalKeystore;
 use sp_keyring::Sr25519Keyring;
-use sp_application_crypto::AppKey;
 
 use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks};
 use polkadot_primitives::v1::{
 	CandidateCommitments, CandidateDescriptor, CandidateHash,
 	CommittedCandidateReceipt, GroupIndex, Hash, HeadData, Id as ParaId,
-	OccupiedCore, PersistedValidationData, SessionInfo, ValidatorId, ValidatorIndex
+	OccupiedCore, PersistedValidationData, SessionInfo, ValidatorIndex
 };
 use polkadot_node_primitives::{PoV, ErasureChunk, AvailableData, BlockData};
-use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr};
-
-/// Get mock keystore with `Ferdie` key.
-pub fn make_ferdie_keystore() -> SyncCryptoStorePtr {
-	let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
-	SyncCryptoStore::sr25519_generate_new(
-		&*keystore,
-		ValidatorId::ID,
-		Some(&Sr25519Keyring::Ferdie.to_seed()),
-	)
-	.expect("Insert key into keystore");
-	keystore
-}
+
 
 /// Create dummy session info with two validator groups.
 pub fn make_session_info() -> SessionInfo {
diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs
index 11680c38b91ed82ab59afe84271913e4813c2322..3d8ea8f40a23fd7f5a5e377ed5778eee4ecddb87 100644
--- a/polkadot/node/network/availability-distribution/src/tests/state.rs
+++ b/polkadot/node/network/availability-distribution/src/tests/state.rs
@@ -46,9 +46,9 @@ use polkadot_node_network_protocol::{
 	request_response::{IncomingRequest, OutgoingRequest, Requests, v1}
 };
 use polkadot_subsystem_testhelpers as test_helpers;
-use test_helpers::SingleItemSink;
+use test_helpers::{SingleItemSink, mock::make_ferdie_keystore};
 
-use super::mock::{make_session_info, OccupiedCoreBuilder, make_ferdie_keystore};
+use super::mock::{make_session_info, OccupiedCoreBuilder};
 use crate::LOG_TARGET;
 
 type VirtualOverseer = test_helpers::TestSubsystemContextHandle<AvailabilityDistributionMessage>;
diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml
index 84d8aaba0e1a13ad2213d5e88d29dd45802f07a0..48f91a06a135b66acd6c653b336fc7d19d894b10 100644
--- a/polkadot/node/network/bridge/Cargo.toml
+++ b/polkadot/node/network/bridge/Cargo.toml
@@ -10,7 +10,6 @@ futures = "0.3.15"
 tracing = "0.1.26"
 polkadot-primitives = { path = "../../../primitives" }
 parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] }
-sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
 polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
diff --git a/polkadot/node/network/bridge/src/lib.rs b/polkadot/node/network/bridge/src/lib.rs
index 101ec01466a53cd7461cad95f7ba11ee0633eab8..6f963e97c2aaa7647b7058ccf3dc4b92142fea10 100644
--- a/polkadot/node/network/bridge/src/lib.rs
+++ b/polkadot/node/network/bridge/src/lib.rs
@@ -24,6 +24,7 @@ use parity_scale_codec::{Encode, Decode};
 use parking_lot::Mutex;
 use futures::prelude::*;
 use futures::stream::BoxStream;
+use polkadot_subsystem::messages::DisputeDistributionMessage;
 use sc_network::Event as NetworkEvent;
 use sp_consensus::SyncOracle;
 
@@ -57,7 +58,8 @@ use polkadot_node_subsystem_util::metrics::{self, prometheus};
 /// To be added to [`NetworkConfiguration::extra_sets`].
 pub use polkadot_node_network_protocol::peer_set::{peer_sets_info, IsAuthority};
 
-use std::collections::{HashMap, hash_map, HashSet};
+use std::collections::HashSet;
+use std::collections::{HashMap, hash_map};
 use std::sync::Arc;
 
 mod validator_discovery;
@@ -66,12 +68,14 @@ mod validator_discovery;
 ///
 /// Defines the `Network` trait with an implementation for an `Arc<NetworkService>`.
 mod network;
-use network::{Network, send_message, get_peer_id_by_authority_id};
+use network::{Network, send_message};
 
 /// Request multiplexer for combining the multiple request sources into a single `Stream` of `AllMessages`.
 mod multiplexer;
 pub use multiplexer::RequestMultiplexer;
 
+use crate::network::get_peer_id_by_authority_id;
+
 #[cfg(test)]
 mod tests;
 
@@ -304,7 +308,7 @@ impl<N, AD> NetworkBridge<N, AD> {
 impl<Net, AD, Context> Subsystem<Context, SubsystemError> for NetworkBridge<Net, AD>
 	where
 		Net: Network + Sync,
-		AD: validator_discovery::AuthorityDiscovery,
+		AD: validator_discovery::AuthorityDiscovery + Clone,
 		Context: SubsystemContext<Message = NetworkBridgeMessage> + overseer::SubsystemContext<Message = NetworkBridgeMessage>,
 {
 	fn start(mut self, ctx: Context) -> SpawnedSubsystem {
@@ -380,7 +384,7 @@ where
 	Context: SubsystemContext<Message = NetworkBridgeMessage>,
 	Context: overseer::SubsystemContext<Message = NetworkBridgeMessage>,
 	N: Network,
-	AD: validator_discovery::AuthorityDiscovery,
+	AD: validator_discovery::AuthorityDiscovery + Clone,
 {
 	// This is kept sorted, descending, by block number.
 	let mut live_heads: Vec<ActivatedLeaf> = Vec::with_capacity(MAX_VIEW_HEADS);
@@ -877,7 +881,7 @@ async fn run_network<N, AD, Context>(
 ) -> SubsystemResult<()>
 where
 	N: Network,
-	AD: validator_discovery::AuthorityDiscovery,
+	AD: validator_discovery::AuthorityDiscovery + Clone,
 	Context: SubsystemContext<Message=NetworkBridgeMessage> + overseer::SubsystemContext<Message=NetworkBridgeMessage>,
 {
 	let shared = Shared::default();
@@ -894,6 +898,10 @@ where
 		.get_statement_fetching()
 		.expect("Gets initialized, must be `Some` on startup. qed.");
 
+	let dispute_receiver = request_multiplexer
+		.get_dispute_sending()
+		.expect("Gets initialized, must be `Some` on startup. qed.");
+
 	let (remote, network_event_handler) = handle_network_messages::<>(
 		ctx.sender().clone(),
 		network_service.clone(),
@@ -906,6 +914,9 @@ where
 
 	ctx.spawn("network-bridge-network-worker", Box::pin(remote))?;
 
+	ctx.send_message(
+		DisputeDistributionMessage::DisputeSendingReceiver(dispute_receiver)
+	).await;
 	ctx.send_message(
 		StatementDistributionMessage::StatementFetchingReceiver(statement_receiver)
 	).await;
diff --git a/polkadot/node/network/bridge/src/multiplexer.rs b/polkadot/node/network/bridge/src/multiplexer.rs
index b88cc414695932410375b8e7a9ba6ef163a80353..0c750bd048b7aea77dfccabf5806c1118d7120ea 100644
--- a/polkadot/node/network/bridge/src/multiplexer.rs
+++ b/polkadot/node/network/bridge/src/multiplexer.rs
@@ -15,6 +15,7 @@
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
 use std::pin::Pin;
+use std::unreachable;
 
 use futures::channel::mpsc;
 use futures::stream::{FusedStream, Stream};
@@ -42,6 +43,7 @@ use polkadot_overseer::AllMessages;
 pub struct RequestMultiplexer {
 	receivers: Vec<(Protocol, mpsc::Receiver<network::IncomingRequest>)>,
 	statement_fetching: Option<mpsc::Receiver<network::IncomingRequest>>,
+	dispute_sending: Option<mpsc::Receiver<network::IncomingRequest>>,
 	next_poll: usize,
 }
 
@@ -68,6 +70,8 @@ impl RequestMultiplexer {
 			})
 			.unzip();
 
+		// Ok this code is ugly as hell, it is also a hack, see https://github.com/paritytech/polkadot/issues/2842.
+		// But it works and is executed on startup so, if anything is wrong here it will be noticed immediately.
 		let index = receivers.iter().enumerate().find_map(|(i, (p, _))|
 			if let Protocol::StatementFetching = p {
 				Some(i)
@@ -77,10 +81,20 @@ impl RequestMultiplexer {
 		).expect("Statement fetching must be registered. qed.");
 		let statement_fetching = Some(receivers.remove(index).1);
 
+		let index = receivers.iter().enumerate().find_map(|(i, (p, _))|
+			if let Protocol::DisputeSending = p {
+				Some(i)
+			} else {
+				None
+			}
+		).expect("Dispute sending must be registered. qed.");
+		let dispute_sending = Some(receivers.remove(index).1);
+
 		(
 			Self {
 				receivers,
 				statement_fetching,
+                dispute_sending,
 				next_poll: 0,
 			},
 			cfgs,
@@ -93,6 +107,13 @@ impl RequestMultiplexer {
 	pub fn get_statement_fetching(&mut self) -> Option<mpsc::Receiver<network::IncomingRequest>> {
 		std::mem::take(&mut self.statement_fetching)
 	}
+
+	/// Get the receiver for handling dispute sending requests.
+	///
+	/// This function will only return `Some` once.
+	pub fn get_dispute_sending(&mut self) -> Option<mpsc::Receiver<network::IncomingRequest>> {
+		std::mem::take(&mut self.dispute_sending)
+	}
 }
 
 impl Stream for RequestMultiplexer {
@@ -174,6 +195,9 @@ fn multiplex_single(
 		Protocol::StatementFetching => {
 			unreachable!("Statement fetching requests are handled directly. qed.");
 		}
+		Protocol::DisputeSending => {
+			unreachable!("Dispute sending request are handled directly. qed.");
+		}
 	};
 	Ok(r)
 }
diff --git a/polkadot/node/network/bridge/src/tests.rs b/polkadot/node/network/bridge/src/tests.rs
index 4fcb57d7554ee3c2e94944c0c3eaf6612c59dc36..7a90e299aa642a637c0e2fa9c2529d80543427f6 100644
--- a/polkadot/node/network/bridge/src/tests.rs
+++ b/polkadot/node/network/bridge/src/tests.rs
@@ -67,7 +67,7 @@ struct TestNetwork {
 	_req_configs: Vec<RequestResponseConfig>,
 }
 
-#[derive(Clone)]
+#[derive(Clone, Debug)]
 struct TestAuthorityDiscovery;
 
 // The test's view of the network. This receives updates from the subsystem in the form
@@ -688,6 +688,12 @@ fn peer_view_updates_sent_via_overseer() {
 
 		let view = view![Hash::repeat_byte(1)];
 
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::DisputeDistribution(
+				DisputeDistributionMessage::DisputeSendingReceiver(_)
+			)
+		);
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::StatementDistribution(
@@ -740,6 +746,12 @@ fn peer_messages_sent_via_overseer() {
 			ObservedRole::Full,
 		).await;
 
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::DisputeDistribution(
+				DisputeDistributionMessage::DisputeSendingReceiver(_)
+			)
+		);
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::StatementDistribution(
@@ -812,6 +824,12 @@ fn peer_disconnect_from_just_one_peerset() {
 		network_handle.connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full).await;
 		network_handle.connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full).await;
 
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::DisputeDistribution(
+				DisputeDistributionMessage::DisputeSendingReceiver(_)
+			)
+		);
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::StatementDistribution(
@@ -894,6 +912,12 @@ fn relays_collation_protocol_messages() {
 		let peer_a = PeerId::random();
 		let peer_b = PeerId::random();
 
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::DisputeDistribution(
+				DisputeDistributionMessage::DisputeSendingReceiver(_)
+			)
+		);
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::StatementDistribution(
@@ -992,6 +1016,12 @@ fn different_views_on_different_peer_sets() {
 		network_handle.connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full).await;
 		network_handle.connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full).await;
 
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::DisputeDistribution(
+				DisputeDistributionMessage::DisputeSendingReceiver(_)
+			)
+		);
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::StatementDistribution(
@@ -1153,6 +1183,12 @@ fn send_messages_to_peers() {
 		network_handle.connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full).await;
 		network_handle.connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full).await;
 
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::DisputeDistribution(
+				DisputeDistributionMessage::DisputeSendingReceiver(_)
+			)
+		);
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::StatementDistribution(
@@ -1279,7 +1315,8 @@ fn spread_event_to_subsystems_is_up_to_date() {
 			AllMessages::ApprovalDistribution(_) => { cnt += 1; }
 			AllMessages::GossipSupport(_) => unreachable!("Not interested in network events"),
 			AllMessages::DisputeCoordinator(_) => unreachable!("Not interested in network events"),
-			AllMessages::DisputeParticipation(_) => unreachable!("Not interetsed in network events"),
+			AllMessages::DisputeParticipation(_) => unreachable!("Not interested in network events"),
+			AllMessages::DisputeDistribution(_) => unreachable!("Not interested in network events"),
 			AllMessages::ChainSelection(_) => unreachable!("Not interested in network events"),
 			// Add variants here as needed, `{ cnt += 1; }` for those that need to be
 			// notified, `unreachable!()` for those that should not.
@@ -1325,6 +1362,12 @@ fn our_view_updates_decreasing_order_and_limited_to_max() {
 			0,
 		);
 
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::DisputeDistribution(
+				DisputeDistributionMessage::DisputeSendingReceiver(_)
+			)
+		);
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::StatementDistribution(
diff --git a/polkadot/node/network/bridge/src/validator_discovery.rs b/polkadot/node/network/bridge/src/validator_discovery.rs
index 2debf12d8c0ef06b4cb4760b8c12f3454eff5dc7..c17fe42c0f30c57e680faaa04ca130c5bce3023c 100644
--- a/polkadot/node/network/bridge/src/validator_discovery.rs
+++ b/polkadot/node/network/bridge/src/validator_discovery.rs
@@ -21,37 +21,16 @@ use crate::Network;
 use core::marker::PhantomData;
 use std::collections::HashSet;
 
-use async_trait::async_trait;
 use futures::channel::oneshot;
 
 use sc_network::multiaddr::Multiaddr;
-use sc_authority_discovery::Service as AuthorityDiscoveryService;
-use polkadot_node_network_protocol::PeerId;
+
 use polkadot_primitives::v1::AuthorityDiscoveryId;
 use polkadot_node_network_protocol::peer_set::{PeerSet, PerPeerSet};
+pub use polkadot_node_network_protocol::authority_discovery::AuthorityDiscovery;
 
 const LOG_TARGET: &str = "parachain::validator-discovery";
 
-/// An abstraction over the authority discovery service.
-#[async_trait]
-pub trait AuthorityDiscovery: Send + Clone + 'static {
-	/// Get the addresses for the given [`AuthorityId`] from the local address cache.
-	async fn get_addresses_by_authority_id(&mut self, authority: AuthorityDiscoveryId) -> Option<Vec<Multiaddr>>;
-	/// Get the [`AuthorityId`] for the given [`PeerId`] from the local address cache.
-	async fn get_authority_id_by_peer_id(&mut self, peer_id: PeerId) -> Option<AuthorityDiscoveryId>;
-}
-
-#[async_trait]
-impl AuthorityDiscovery for AuthorityDiscoveryService {
-	async fn get_addresses_by_authority_id(&mut self, authority: AuthorityDiscoveryId) -> Option<Vec<Multiaddr>> {
-		AuthorityDiscoveryService::get_addresses_by_authority_id(self, authority).await
-	}
-
-	async fn get_authority_id_by_peer_id(&mut self, peer_id: PeerId) -> Option<AuthorityDiscoveryId> {
-		AuthorityDiscoveryService::get_authority_id_by_peer_id(self, peer_id).await
-	}
-}
-
 pub(super) struct Service<N, AD> {
 	state: PerPeerSet<StatePerPeerSet>,
 	// PhantomData used to make the struct generic instead of having generic methods
@@ -147,9 +126,10 @@ mod tests {
 
 	use std::{borrow::Cow, collections::HashMap};
 	use futures::stream::BoxStream;
+	use async_trait::async_trait;
 	use sc_network::{Event as NetworkEvent, IfDisconnected};
 	use sp_keyring::Sr25519Keyring;
-	use polkadot_node_network_protocol::request_response::request::Requests;
+	use polkadot_node_network_protocol::{PeerId, request_response::request::Requests};
 
 	fn new_service() -> Service<TestNetwork, TestAuthorityDiscovery> {
 		Service::new()
@@ -164,7 +144,7 @@ mod tests {
 		peers_set: HashSet<Multiaddr>,
 	}
 
-	#[derive(Default, Clone)]
+	#[derive(Default, Clone, Debug)]
 	struct TestAuthorityDiscovery {
 		by_authority_id: HashMap<AuthorityDiscoveryId, Multiaddr>,
 		by_peer_id: HashMap<PeerId, AuthorityDiscoveryId>,
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
index 643429b9c060f0aaae699070a34f32c75bdabd31..6baa865232e344427080b64e705928344742d630 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
@@ -450,8 +450,8 @@ where
 	Context: SubsystemContext<Message = CollatorProtocolMessage>,
 	Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
 {
-	let session_index = runtime.get_session_index(ctx, relay_parent).await?;
-	let info = &runtime.get_session_info_by_index(ctx, relay_parent, session_index)
+	let session_index = runtime.get_session_index(ctx.sender(), relay_parent).await?;
+	let info = &runtime.get_session_info_by_index(ctx.sender(), relay_parent, session_index)
 		.await?
 		.session_info;
 	tracing::debug!(target: LOG_TARGET, ?session_index, "Received session info");
@@ -798,7 +798,7 @@ where
 					"Collation seconded message received with none-seconded statement.",
 				);
 			} else {
-				let statement = runtime.check_signature(ctx, relay_parent, statement)
+				let statement = runtime.check_signature(ctx.sender(), relay_parent, statement)
 					.await?
 					.map_err(NonFatal::InvalidStatementSignature)?;
 
diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..46332b89c9419d2036115f9a894d1f77b857fbab
--- /dev/null
+++ b/polkadot/node/network/dispute-distribution/Cargo.toml
@@ -0,0 +1,38 @@
+[package]
+name = "polkadot-dispute-distribution"
+version = "0.1.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2018"
+
+[dependencies]
+futures = "0.3.15"
+tracing = "0.1.26"
+parity-scale-codec = { version = "2.0.0", features = ["std"]  }
+polkadot-primitives = { path = "../../../primitives" }
+polkadot-erasure-coding = { path = "../../../erasure-coding" }
+polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
+polkadot-node-network-protocol = { path = "../../network/protocol" }
+polkadot-node-subsystem-util = { path = "../../subsystem-util" }
+polkadot-node-primitives = { path = "../../primitives" }
+polkadot-node-core-runtime-api = { path = "../../core/runtime-api" }
+sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"]  }
+sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
+thiserror = "1.0.23"
+rand = "0.8.3"
+lru = "0.6.5"
+
+[dev-dependencies]
+async-trait = "0.1.42"
+polkadot-subsystem-testhelpers = { package = "polkadot-node-subsystem-test-helpers", path = "../../subsystem-test-helpers" }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] }
+sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-network = {git = "https://github.com/paritytech/substrate", branch = "master" }
+futures-timer = "3.0.2"
+assert_matches = "1.4.0"
+maplit = "1.0"
+smallvec = "1.6.1"
+lazy_static = "1.4.0"
diff --git a/polkadot/node/network/dispute-distribution/src/error.rs b/polkadot/node/network/dispute-distribution/src/error.rs
new file mode 100644
index 0000000000000000000000000000000000000000..e7112b6942cf037de597f9c067362013fe8b20b7
--- /dev/null
+++ b/polkadot/node/network/dispute-distribution/src/error.rs
@@ -0,0 +1,101 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+//
+
+//! Error handling related code and Error/Result definitions.
+
+use thiserror::Error;
+
+use polkadot_node_subsystem_util::{Fault, runtime, unwrap_non_fatal};
+use polkadot_subsystem::SubsystemError;
+
+use crate::LOG_TARGET;
+use crate::sender;
+
+#[derive(Debug, Error)]
+#[error(transparent)]
+pub struct Error(pub Fault<NonFatal, Fatal>);
+
+impl From<NonFatal> for Error {
+	fn from(e: NonFatal) -> Self {
+		Self(Fault::from_non_fatal(e))
+	}
+}
+
+impl From<Fatal> for Error {
+	fn from(f: Fatal) -> Self {
+		Self(Fault::from_fatal(f))
+	}
+}
+
+impl From<sender::Error> for Error {
+	fn from(e: sender::Error) -> Self {
+		match e.0 {
+			Fault::Fatal(f) => Self(Fault::Fatal(Fatal::Sender(f))),
+			Fault::Err(nf) => Self(Fault::Err(NonFatal::Sender(nf))),
+		}
+	}
+}
+
+/// Fatal errors of this subsystem.
+#[derive(Debug, Error)]
+pub enum Fatal {
+
+	/// Receiving subsystem message from overseer failed.
+	#[error("Receiving message from overseer failed")]
+	SubsystemReceive(#[source] SubsystemError),
+
+	/// Spawning a running task failed.
+	#[error("Spawning subsystem task failed")]
+	SpawnTask(#[source] SubsystemError),
+
+	/// DisputeSender mpsc receiver exhausted.
+	#[error("Erasure chunk requester stream exhausted")]
+	SenderExhausted,
+
+	/// Errors coming from runtime::Runtime.
+	#[error("Error while accessing runtime information")]
+	Runtime(#[from] #[source] runtime::Fatal),
+
+	/// Errors coming from DisputeSender
+	#[error("Error while accessing runtime information")]
+	Sender(#[from] #[source] sender::Fatal),
+}
+
+/// Non-fatal errors of this subsystem.
+#[derive(Debug, Error)]
+pub enum NonFatal {
+	/// Errors coming from DisputeSender
+	#[error("Error while accessing runtime information")]
+	Sender(#[from] #[source] sender::NonFatal),
+}
+
+pub type Result<T> = std::result::Result<T, Error>;
+
+pub type FatalResult<T> = std::result::Result<T, Fatal>;
+
+/// Utility for eating top level errors and log them.
+///
+/// We basically always want to try and continue on error. This utility function is meant to
+/// consume top-level errors by simply logging them
+pub fn log_error(result: Result<()>, ctx: &'static str)
+	-> std::result::Result<(), Fatal>
+{
+	if let Some(error) = unwrap_non_fatal(result.map_err(|e| e.0))? {
+		tracing::warn!(target: LOG_TARGET, error = ?error, ctx);
+	}
+	Ok(())
+}
diff --git a/polkadot/node/network/dispute-distribution/src/lib.rs b/polkadot/node/network/dispute-distribution/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..c9e53ad4489a0760ea2120f5197b8bc0a9130e3c
--- /dev/null
+++ b/polkadot/node/network/dispute-distribution/src/lib.rs
@@ -0,0 +1,271 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+
+//! # Sending and receiving of `DisputeRequest`s.
+//!
+//! This subsystem essentially consists of two parts:
+//!
+//! - a sender
+//! - and a receiver
+//!
+//! The sender is responsible for getting our vote out, see [`sender`]. The receiver handles
+//! incoming [`DisputeRequest`]s and offers spam protection, see [`receiver`].
+
+use futures::channel::{mpsc};
+use futures::{FutureExt, StreamExt, TryFutureExt};
+
+use polkadot_node_network_protocol::authority_discovery::AuthorityDiscovery;
+use sp_keystore::SyncCryptoStorePtr;
+
+use polkadot_node_primitives::DISPUTE_WINDOW;
+use polkadot_subsystem::{
+	overseer, messages::DisputeDistributionMessage, FromOverseer, OverseerSignal, SpawnedSubsystem,
+	SubsystemContext, SubsystemError,
+};
+use polkadot_node_subsystem_util::{
+	runtime,
+	runtime::RuntimeInfo,
+};
+
+/// ## The sender [`DisputeSender`]
+///
+/// The sender (`DisputeSender`) keeps track of live disputes and makes sure our vote gets out for
+/// each one of those. The sender is responsible for sending our vote to each validator
+/// participating in the dispute and to each authority currently authoring blocks. The sending can
+/// be initiated by sending `DisputeDistributionMessage::SendDispute` message to this subsystem.
+///
+/// In addition the `DisputeSender` will query the coordinator for active disputes on each
+/// [`DisputeSender::update_leaves`] call and will initiate sending (start a `SendTask`) for every,
+/// to this subsystem, unknown dispute. This is to make sure, we get our vote out, even on
+/// restarts.
+///
+///	The actual work of sending and keeping track of transmission attempts to each validator for a
+///	particular dispute are done by [`SendTask`].  The purpose of the `DisputeSender` is to keep
+///	track of all ongoing disputes and start and clean up `SendTask`s accordingly.
+mod sender;
+use self::sender::{DisputeSender, TaskFinish};
+
+///	## The receiver [`DisputesReceiver`]
+///
+///	The receiving side is implemented as `DisputesReceiver` and is run as a separate long running task within
+///	this subsystem ([`DisputesReceiver::run`]).
+///
+///	Conceptually all the receiver has to do, is waiting for incoming requests which are passed in
+///	via a dedicated channel and forwarding them to the dispute coordinator via
+///	`DisputeCoordinatorMessage::ImportStatements`. Being the interface to the network and untrusted
+///	nodes, the reality is not that simple of course. Before importing statements the receiver will
+///	make sure as good as it can to filter out malicious/unwanted/spammy requests. For this it does
+///	the following:
+///
+///	- Drop all messages from non validator nodes, for this it requires the [`AuthorityDiscovery`]
+///	service.
+///	- Drop messages from a node, if we are already importing a message from that node (flood).
+///	- Drop messages from nodes, that provided us messages where the statement import failed.
+///	- Drop any obviously invalid votes (invalid signatures for example).
+///	- Ban peers whose votes were deemed invalid.
+///
+/// For successfully imported votes, we will confirm the receipt of the message back to the sender.
+/// This way a received confirmation guarantees, that the vote has been stored to disk by the
+/// receiver.
+mod receiver;
+use self::receiver::DisputesReceiver;
+
+/// Error and [`Result`] type for this subsystem.
+mod error;
+use error::{Fatal, FatalResult};
+use error::{Result, log_error};
+
+#[cfg(test)]
+mod tests;
+
+mod metrics;
+//// Prometheus `Metrics` for dispute distribution.
+pub use metrics::Metrics;
+
+const LOG_TARGET: &'static str = "parachain::dispute-distribution";
+
+/// The dispute distribution subsystem.
+pub struct DisputeDistributionSubsystem<AD> {
+	/// Easy and efficient runtime access for this subsystem.
+	runtime: RuntimeInfo,
+
+	/// Sender for our dispute requests.
+	disputes_sender: DisputeSender,
+
+	/// Receive messages from `SendTask`.
+	sender_rx: mpsc::Receiver<TaskFinish>,
+
+	/// Authority discovery service.
+	authority_discovery: AD,
+
+	/// Metrics for this subsystem.
+	metrics: Metrics,
+}
+
+impl<Context, AD> overseer::Subsystem<Context, SubsystemError> for DisputeDistributionSubsystem<AD>
+where
+	Context: SubsystemContext<Message = DisputeDistributionMessage>
+		+ overseer::SubsystemContext<Message = DisputeDistributionMessage>
+		+ Sync + Send,
+	AD: AuthorityDiscovery + Clone,
+{
+	fn start(self, ctx: Context) -> SpawnedSubsystem {
+		let future = self
+			.run(ctx)
+			.map_err(|e| SubsystemError::with_origin("dispute-distribution", e))
+			.boxed();
+
+		SpawnedSubsystem {
+			name: "dispute-distribution-subsystem",
+			future,
+		}
+	}
+}
+
+impl<AD> DisputeDistributionSubsystem<AD> 
+where
+	AD: AuthorityDiscovery + Clone,
+{
+	/// Create a new instance of the availability distribution.
+	pub fn new(keystore: SyncCryptoStorePtr, authority_discovery: AD, metrics: Metrics) -> Self {
+		let runtime = RuntimeInfo::new_with_config(runtime::Config {
+			keystore: Some(keystore),
+			session_cache_lru_size: DISPUTE_WINDOW as usize,
+		});
+		let (tx, sender_rx) = mpsc::channel(1);
+		let disputes_sender = DisputeSender::new(tx, metrics.clone());
+		Self { runtime, disputes_sender, sender_rx, authority_discovery, metrics }
+	}
+
+	/// Start processing work as passed on from the Overseer.
+	async fn run<Context>(mut self, mut ctx: Context) -> std::result::Result<(), Fatal>
+	where
+		Context: SubsystemContext<Message = DisputeDistributionMessage>
+			+ overseer::SubsystemContext<Message = DisputeDistributionMessage>
+			+ Sync + Send,
+	{
+		loop {
+			let message = MuxedMessage::receive(&mut ctx, &mut self.sender_rx).await;
+			match message {
+				MuxedMessage::Subsystem(result) => {
+					let result = match result? {
+						FromOverseer::Signal(signal) => {
+							match self.handle_signals(&mut ctx, signal).await {
+								Ok(SignalResult::Conclude) => return Ok(()),
+								Ok(SignalResult::Continue) => Ok(()),
+								Err(f) => Err(f),
+							}
+						}
+						FromOverseer::Communication { msg } =>
+							self.handle_subsystem_message(&mut ctx, msg).await,
+					};
+					log_error(result, "on FromOverseer")?;
+				}
+				MuxedMessage::Sender(result) => {
+					self.disputes_sender.on_task_message(
+						result.ok_or(Fatal::SenderExhausted)?
+					)
+					.await;
+				}
+			}
+		}
+	}
+
+	/// Handle overseer signals.
+	async fn handle_signals<Context: SubsystemContext> (
+		&mut self,
+		ctx: &mut Context,
+		signal: OverseerSignal,
+	) -> Result<SignalResult>
+	{
+		match signal {
+			OverseerSignal::Conclude =>
+				return Ok(SignalResult::Conclude),
+			OverseerSignal::ActiveLeaves(update) => {
+				self.disputes_sender.update_leaves(
+					ctx,
+					&mut self.runtime,
+					update
+				)
+				.await?;
+			}
+			OverseerSignal::BlockFinalized(_,_) => {}
+		};
+		Ok(SignalResult::Continue)
+	}
+
+	/// Handle `DisputeDistributionMessage`s.
+	async fn handle_subsystem_message<Context: SubsystemContext> (
+		&mut self,
+		ctx: &mut Context,
+		msg: DisputeDistributionMessage
+	) -> Result<()>
+	{
+		match msg {
+			DisputeDistributionMessage::SendDispute(dispute_msg) =>
+				self.disputes_sender.start_sender(ctx, &mut self.runtime, dispute_msg).await?,
+			// This message will only arrive once:
+			DisputeDistributionMessage::DisputeSendingReceiver(receiver) => {
+				let receiver = DisputesReceiver::new(
+					ctx.sender().clone(),
+					receiver,
+					self.authority_discovery.clone(),
+					self.metrics.clone()
+				);
+
+				ctx
+					.spawn("disputes-receiver", receiver.run().boxed(),)
+					.map_err(Fatal::SpawnTask)?;
+			},
+
+		}
+		Ok(())
+	}
+}
+
+/// Messages to be handled in this subsystem.
+#[derive(Debug)]
+enum MuxedMessage {
+	/// Messages from other subsystems.
+	Subsystem(FatalResult<FromOverseer<DisputeDistributionMessage>>),
+	/// Messages from spawned sender background tasks.
+	Sender(Option<TaskFinish>),
+}
+
+impl MuxedMessage {
+	async fn receive(
+		ctx: &mut (impl SubsystemContext<Message = DisputeDistributionMessage> + overseer::SubsystemContext<Message = DisputeDistributionMessage>),
+		from_sender: &mut mpsc::Receiver<TaskFinish>,
+	) -> Self {
+		// We are only fusing here to make `select` happy, in reality we will quit if the stream
+		// ends.
+		let from_overseer = ctx.recv().fuse();
+		futures::pin_mut!(from_overseer, from_sender);
+		futures::select!(
+			msg = from_overseer => MuxedMessage::Subsystem(msg.map_err(Fatal::SubsystemReceive)),
+			msg = from_sender.next() => MuxedMessage::Sender(msg),
+		)
+	}
+}
+
+/// Result of handling signal from overseer.
+enum SignalResult {
+	/// Overseer asked us to conclude.
+	Conclude,
+	/// We can continue processing events.
+	Continue,
+}
diff --git a/polkadot/node/network/dispute-distribution/src/metrics.rs b/polkadot/node/network/dispute-distribution/src/metrics.rs
new file mode 100644
index 0000000000000000000000000000000000000000..97611088c4dc6a8666e728d348e60009f91d2f32
--- /dev/null
+++ b/polkadot/node/network/dispute-distribution/src/metrics.rs
@@ -0,0 +1,109 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use polkadot_node_subsystem_util::metrics::prometheus::{Counter, U64, Registry, PrometheusError, CounterVec, Opts};
+use polkadot_node_subsystem_util::metrics::prometheus;
+use polkadot_node_subsystem_util::metrics;
+
+/// Label for success counters.
+pub const SUCCEEDED: &'static str = "succeeded";
+
+/// Label for fail counters.
+pub const FAILED: &'static str = "failed";
+
+/// Dispute Distribution metrics.
+#[derive(Clone, Default)]
+pub struct Metrics(Option<MetricsInner>);
+
+#[derive(Clone)]
+struct MetricsInner {
+	/// Number of sent dispute requests (succeeded and failed).
+	sent_requests: CounterVec<U64>,
+
+	/// Number of requests received.
+	///
+	/// This is all requests coming in, regardless of whether they are processed or dropped.
+	received_requests: Counter<U64>,
+
+	/// Number of requests for which `ImportStatements` returned.
+	///
+	/// We both have success full imports and failed imports here.
+	imported_requests: CounterVec<U64>,
+}
+
+impl Metrics {
+	/// Create new dummy metrics, not reporting anything.
+	pub fn new_dummy() -> Self {
+		Metrics(None)
+	}
+
+	/// Increment counter on finished request sending.
+	pub fn on_sent_request(&self, label: &'static str) {
+		if let Some(metrics) = &self.0 {
+			metrics.sent_requests.with_label_values(&[label]).inc()
+		}
+	}
+
+	/// Increment counter on served chunks.
+	pub fn on_received_request(&self) {
+		if let Some(metrics) = &self.0 {
+			metrics.received_requests.inc()
+		}
+	}
+
+	/// Statements have been imported.
+	pub fn on_imported(&self, label: &'static str) {
+		if let Some(metrics) = &self.0 {
+			metrics.imported_requests.with_label_values(&[label]).inc()
+		}
+	}
+}
+
+impl metrics::Metrics for Metrics {
+	fn try_register(registry: &Registry) -> Result<Self, PrometheusError> {
+		let metrics = MetricsInner {
+			sent_requests: prometheus::register(
+				CounterVec::new(
+					Opts::new(
+						"parachain_dispute_distribution_sent_requests",
+						"Total number of sent requests.",
+					),
+					&["success"]
+				)?,
+				registry,
+			)?,
+			received_requests: prometheus::register(
+				Counter::new(
+					"parachain_dispute_distribution_received_requests",
+					"Total number of received dispute requests.",
+				)?,
+				registry,
+			)?,
+			imported_requests: prometheus::register(
+				CounterVec::new(
+					Opts::new(
+						"parachain_dispute_distribution_imported_requests",
+						"Total number of imported requests.",
+					),
+					&["success"]
+				)?,
+				registry,
+			)?,
+		};
+		Ok(Metrics(Some(metrics)))
+	}
+}
+
diff --git a/polkadot/node/network/dispute-distribution/src/receiver/error.rs b/polkadot/node/network/dispute-distribution/src/receiver/error.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ff0c799995b0a573ee2303c3bf8091b73f7ca994
--- /dev/null
+++ b/polkadot/node/network/dispute-distribution/src/receiver/error.rs
@@ -0,0 +1,110 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+//
+
+//! Error handling related code and Error/Result definitions.
+
+use thiserror::Error;
+
+use polkadot_node_network_protocol::PeerId;
+use polkadot_node_network_protocol::request_response::request::ReceiveError;
+use polkadot_node_subsystem_util::{Fault, runtime, unwrap_non_fatal};
+
+use crate::LOG_TARGET;
+
+#[derive(Debug, Error)]
+#[error(transparent)]
+pub struct Error(pub Fault<NonFatal, Fatal>);
+
+impl From<NonFatal> for Error {
+	fn from(e: NonFatal) -> Self {
+		Self(Fault::from_non_fatal(e))
+	}
+}
+
+impl From<Fatal> for Error {
+	fn from(f: Fatal) -> Self {
+		Self(Fault::from_fatal(f))
+	}
+}
+
+impl From<runtime::Error> for Error {
+	fn from(o: runtime::Error) -> Self {
+		Self(Fault::from_other(o))
+	}
+}
+
+/// Fatal errors of this subsystem.
+#[derive(Debug, Error)]
+pub enum Fatal {
+	/// Request channel returned `None`. Likely a system shutdown.
+	#[error("Request channel stream finished.")]
+	RequestChannelFinished,
+
+	/// Errors coming from runtime::Runtime.
+	#[error("Error while accessing runtime information")]
+	Runtime(#[from] #[source] runtime::Fatal),
+}
+
+/// Non-fatal errors of this subsystem.
+#[derive(Debug, Error)]
+pub enum NonFatal {
+	/// Answering request failed.
+	#[error("Sending back response to peer {0} failed.")]
+	SendResponse(PeerId),
+
+	/// Getting request from raw request failed.
+	#[error("Decoding request failed.")]
+	FromRawRequest(#[source] ReceiveError),
+
+	/// Setting reputation for peer failed.
+	#[error("Changing peer's ({0}) reputation failed.")]
+	SetPeerReputation(PeerId),
+
+	/// Peer sent us request with invalid signature.
+	#[error("Dispute request with invalid signatures, from peer {0}.")]
+	InvalidSignature(PeerId),
+
+	/// Import oneshot got canceled.
+	#[error("Import of dispute got canceled for peer {0} - import failed for some reason.")]
+	ImportCanceled(PeerId),
+
+	/// Non validator tried to participate in dispute.
+	#[error("Peer {0} is not a validator.")]
+	NotAValidator(PeerId),
+
+	/// Errors coming from runtime::Runtime.
+	#[error("Error while accessing runtime information")]
+	Runtime(#[from] #[source] runtime::NonFatal),
+}
+
+pub type Result<T> = std::result::Result<T, Error>;
+
+pub type FatalResult<T> = std::result::Result<T, Fatal>;
+pub type NonFatalResult<T> = std::result::Result<T, NonFatal>;
+
+/// Utility for eating top level errors and log them.
+///
+/// We basically always want to try and continue on error. This utility function is meant to
+/// consume top-level errors by simply logging them
+pub fn log_error(result: Result<()>)
+	-> std::result::Result<(), Fatal>
+{
+	if let Some(error) = unwrap_non_fatal(result.map_err(|e| e.0))? {
+		tracing::warn!(target: LOG_TARGET, error = ?error);
+	}
+	Ok(())
+}
diff --git a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..83ca4ba3077db555fd56b3dbe3001ef2a1ae431c
--- /dev/null
+++ b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs
@@ -0,0 +1,429 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+
+use std::collections::HashSet;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+use futures::FutureExt;
+use futures::Stream;
+use futures::future::{BoxFuture, poll_fn};
+use futures::stream::FusedStream;
+use lru::LruCache;
+use futures::{channel::mpsc, channel::oneshot, stream::StreamExt, stream::FuturesUnordered};
+
+use polkadot_node_network_protocol::{
+	PeerId,
+	UnifiedReputationChange as Rep,
+	authority_discovery::AuthorityDiscovery,
+	request_response::{
+		IncomingRequest,
+		request::OutgoingResponse,
+		request::OutgoingResponseSender,
+		v1::DisputeRequest,
+		v1::DisputeResponse,
+	},
+};
+use polkadot_node_primitives::DISPUTE_WINDOW;
+use polkadot_node_subsystem_util::{
+	runtime,
+	runtime::RuntimeInfo,
+};
+use polkadot_subsystem::{
+	SubsystemSender,
+	messages::{
+		AllMessages, DisputeCoordinatorMessage, ImportStatementsResult,
+	},
+};
+
+use crate::metrics::{FAILED, SUCCEEDED};
+use crate::{LOG_TARGET, Metrics};
+
+mod error;
+use self::error::{log_error, FatalResult, NonFatalResult, NonFatal, Fatal, Result};
+
+const COST_INVALID_REQUEST: Rep = Rep::CostMajor("Received message could not be decoded.");
+const COST_INVALID_SIGNATURE: Rep = Rep::Malicious("Signatures were invalid.");
+const COST_INVALID_CANDIDATE: Rep = Rep::Malicious("Reported candidate was not available.");
+const COST_NOT_A_VALIDATOR: Rep = Rep::CostMajor("Reporting peer was not a validator.");
+
+/// How many statement imports we want to issue in parallel:
+pub const MAX_PARALLEL_IMPORTS: usize = 10;
+
+/// State for handling incoming `DisputeRequest` messages.
+///
+/// This is supposed to run as its own task in order to easily impose back pressure on the incoming
+/// request channel and at the same time to drop flood messages as fast as possible.
+pub struct DisputesReceiver<Sender, AD> {
+	/// Access to session information.
+	runtime: RuntimeInfo,
+
+	/// Subsystem sender for communication with other subsystems.
+	sender: Sender,
+
+	/// Channel to retrieve incoming requests from.
+	receiver: mpsc::Receiver<sc_network::config::IncomingRequest>,
+
+	/// Authority discovery service:
+	authority_discovery: AD,
+
+	/// Imports currently being processed.
+	pending_imports: PendingImports,
+
+	/// We keep record of the last banned peers.
+	///
+	/// This is needed because once we ban a peer, we will very likely still have pending requests
+	/// in the incoming channel - we should not waste time recovering availability for those, as we
+	/// already know the peer is malicious.
+	banned_peers: LruCache<PeerId, ()>,
+
+	/// Log received requests.
+	metrics: Metrics,
+}
+
+/// Messages as handled by this receiver internally.
+enum MuxedMessage {
+	/// An import got confirmed by the coordinator.
+	///
+	/// We need to handle those for two reasons:
+	///
+	/// - We need to make sure responses are actually sent (therefore we need to await futures
+	/// promptly).
+	/// - We need to update banned_peers accordingly to the result.
+	ConfirmedImport(NonFatalResult<(PeerId, ImportStatementsResult)>),
+
+	/// A new request has arrived and should be handled.
+	NewRequest(sc_network::config::IncomingRequest),
+}
+
+impl MuxedMessage {
+	async fn receive(
+		pending_imports: &mut PendingImports,
+		pending_requests: &mut mpsc::Receiver<sc_network::config::IncomingRequest>,
+	) -> FatalResult<MuxedMessage> {
+		poll_fn(|ctx| {
+			if let Poll::Ready(v) = pending_requests.poll_next_unpin(ctx) {
+				let r = match v {
+					None => Err(Fatal::RequestChannelFinished),
+					Some(msg) => Ok(MuxedMessage::NewRequest(msg)),
+				};
+				return Poll::Ready(r)
+			}
+			// In case of Ready(None) return `Pending` below - we want to wait for the next request
+			// in that case.
+			if let Poll::Ready(Some(v)) = pending_imports.poll_next_unpin(ctx) {
+				return Poll::Ready(Ok(MuxedMessage::ConfirmedImport(v)))
+			}
+			Poll::Pending
+		}).await
+	}
+}
+
+impl<Sender: SubsystemSender, AD> DisputesReceiver<Sender, AD>
+where 
+	AD: AuthorityDiscovery,
+{
+	/// Create a new receiver which can be `run`.
+	pub fn new(
+		sender: Sender,
+		receiver: mpsc::Receiver<sc_network::config::IncomingRequest>,
+		authority_discovery: AD,
+		metrics: Metrics,
+	) -> Self {
+		let runtime = RuntimeInfo::new_with_config(runtime::Config {
+			keystore: None,
+			session_cache_lru_size: DISPUTE_WINDOW as usize,
+		});
+		Self {
+			runtime,
+			sender,
+			receiver,
+			authority_discovery,
+			pending_imports: PendingImports::new(),
+			// Size of MAX_PARALLEL_IMPORTS ensures we are going to immediately get rid of any
+			// malicious requests still pending in the incoming queue.
+			banned_peers: LruCache::new(MAX_PARALLEL_IMPORTS),
+			metrics,
+		}
+	}
+
+	/// Get that receiver started.
+	///
+	/// This is an endless loop and should be spawned into its own task.
+	pub async fn run(mut self) {
+		loop {
+			match log_error(self.run_inner().await) {
+				Ok(()) => {}
+				Err(Fatal::RequestChannelFinished) => {
+					tracing::debug!(
+						target: LOG_TARGET,
+						"Incoming request stream exhausted - shutting down?"
+					);
+					return
+				}
+				Err(err) => {	
+					tracing::warn!(
+						target: LOG_TARGET,
+						?err,
+						"Dispute receiver died."
+					);
+					return
+				}
+			}
+		}
+	}
+
+	/// Actual work happening here.
+	async fn run_inner(&mut self) -> Result<()> {
+
+		let msg = MuxedMessage::receive(
+			&mut self.pending_imports,
+			&mut self.receiver
+		)
+		.await?;
+
+		let raw = match msg {
+			// We need to clean up futures, to make sure responses are sent:
+			MuxedMessage::ConfirmedImport(m_bad) => {
+				self.ban_bad_peer(m_bad)?;
+				return Ok(())
+			}
+			MuxedMessage::NewRequest(req) => req,
+		};
+
+		self.metrics.on_received_request();
+
+		let peer = raw.peer;
+
+		// Only accept messages from validators:
+		if self.authority_discovery.get_authority_id_by_peer_id(raw.peer).await.is_none() {
+			raw.pending_response.send(
+				sc_network::config::OutgoingResponse {
+					result: Err(()),
+					reputation_changes: vec![COST_NOT_A_VALIDATOR.into_base_rep()],
+					sent_feedback: None,
+				}
+			)
+			.map_err(|_| NonFatal::SendResponse(peer))?;				
+
+			return Err(NonFatal::NotAValidator(peer).into())
+		}
+
+		let incoming = IncomingRequest::<DisputeRequest>::try_from_raw(
+			raw,
+			vec![COST_INVALID_REQUEST]
+		)
+		.map_err(NonFatal::FromRawRequest)?;
+
+		// Immediately drop requests from peers that already have requests in flight or have
+		// been banned recently (flood protection):
+		if self.pending_imports.peer_is_pending(&peer) || self.banned_peers.contains(&peer) {
+			tracing::trace!(
+				target: LOG_TARGET,
+				?peer,
+				"Dropping message from peer (banned/pending import)"
+			);
+			return Ok(())
+		}
+
+		// Wait for a free slot:
+		if self.pending_imports.len() >= MAX_PARALLEL_IMPORTS as usize {
+			// Wait for one to finish:
+			let r = self.pending_imports.next().await;
+			self.ban_bad_peer(r.expect("pending_imports.len() is greater 0. qed."))?;
+		}
+
+		// All good - initiate import.
+		self.start_import(incoming).await
+	}
+
+	/// Start importing votes for the given request.
+	async fn start_import(
+		&mut self,
+		incoming: IncomingRequest<DisputeRequest>,
+	) -> Result<()> {
+
+		let IncomingRequest {
+			peer, payload, pending_response,
+		} = incoming;
+
+		let info = self.runtime.get_session_info_by_index(
+			&mut self.sender,
+			payload.0.candidate_receipt.descriptor.relay_parent,
+			payload.0.session_index
+		)
+		.await?;
+
+		let votes_result = payload.0.try_into_signed_votes(&info.session_info);
+
+		let (candidate_receipt, valid_vote, invalid_vote) = match votes_result {
+			Err(()) => { // Signature invalid:
+				pending_response.send_outgoing_response(
+					OutgoingResponse {
+						result: Err(()),
+						reputation_changes: vec![COST_INVALID_SIGNATURE],
+						sent_feedback: None,
+					}
+				)
+				.map_err(|_| NonFatal::SetPeerReputation(peer))?;
+
+				return Err(From::from(NonFatal::InvalidSignature(peer)))
+			}
+			Ok(votes) => votes,
+		};
+
+		let (pending_confirmation, confirmation_rx) = oneshot::channel();
+		let candidate_hash = candidate_receipt.hash();
+		self.sender.send_message(
+			AllMessages::DisputeCoordinator(
+				DisputeCoordinatorMessage::ImportStatements {
+					candidate_hash,
+					candidate_receipt,
+					session: valid_vote.0.session_index(),
+					statements: vec![valid_vote, invalid_vote],
+					pending_confirmation,
+				}
+			)
+		)
+		.await;
+
+		self.pending_imports.push(peer, confirmation_rx, pending_response);
+		Ok(())
+	}
+
+	/// Await an import and ban any misbehaving peers.
+	///
+	/// In addition we report import metrics.
+	fn ban_bad_peer(
+		&mut self,
+		result: NonFatalResult<(PeerId, ImportStatementsResult)>
+	) -> NonFatalResult<()> {
+		match result? {
+			(_, ImportStatementsResult::ValidImport) => { 
+				self.metrics.on_imported(SUCCEEDED);
+			}
+			(bad_peer, ImportStatementsResult::InvalidImport) => {
+				self.metrics.on_imported(FAILED);
+				self.banned_peers.put(bad_peer, ());
+			}
+		}
+		Ok(())
+	}
+}
+
+/// Manage pending imports in a way that preserves invariants.
+struct PendingImports {
+	/// Futures in flight.
+	futures: FuturesUnordered<BoxFuture<'static, (PeerId, NonFatalResult<ImportStatementsResult>)>>,
+	/// Peers whose requests are currently in flight.
+	peers: HashSet<PeerId>,
+}
+
+impl PendingImports {
+	pub fn new() -> Self {
+		Self {
+			futures: FuturesUnordered::new(),
+			peers: HashSet::new(),
+		}
+	}
+
+	pub fn push(
+		&mut self,
+		peer: PeerId,
+		handled: oneshot::Receiver<ImportStatementsResult>,
+		pending_response: OutgoingResponseSender<DisputeRequest>
+	) {
+		self.peers.insert(peer);
+		self.futures.push(
+			async move {
+				let r = respond_to_request(peer, handled, pending_response).await;
+				(peer, r)
+			}.boxed()
+		)
+	}
+
+	/// Returns the number of contained futures.
+	pub fn len(&self) -> usize {
+		self.futures.len()
+	}
+
+	/// Check whether a peer has a pending import.
+	pub fn peer_is_pending(&self, peer: &PeerId) -> bool {
+		self.peers.contains(peer)
+	}
+}
+
+impl Stream for PendingImports {
+	type Item = NonFatalResult<(PeerId, ImportStatementsResult)>;
+	fn poll_next(
+		mut self: Pin<&mut Self>,
+		ctx: &mut Context<'_>
+	) -> Poll<Option<Self::Item>> {
+		match Pin::new(&mut self.futures).poll_next(ctx) {
+			Poll::Pending => Poll::Pending,
+			Poll::Ready(None) => Poll::Ready(None),
+			Poll::Ready(Some((peer, result))) => {
+				self.peers.remove(&peer);
+				Poll::Ready(Some(result.map(|r| (peer,r))))
+			}
+		}
+	}
+
+}
+impl FusedStream for PendingImports {
+	fn is_terminated(&self) -> bool { 
+		self.futures.is_terminated()
+	}
+}
+
+// Future for `PendingImports`
+//
+// - Wait for import
+// - Punish peer
+// - Deliver result
+async fn respond_to_request(
+	peer: PeerId,
+	handled: oneshot::Receiver<ImportStatementsResult>,
+	pending_response: OutgoingResponseSender<DisputeRequest>
+) -> NonFatalResult<ImportStatementsResult> {
+
+	let result = handled
+		.await
+		.map_err(|_| NonFatal::ImportCanceled(peer))?
+	;
+
+	let response = match result {
+		ImportStatementsResult::ValidImport =>
+			OutgoingResponse {
+				result: Ok(DisputeResponse::Confirmed),
+				reputation_changes: Vec::new(),
+				sent_feedback: None,
+			},
+		ImportStatementsResult::InvalidImport =>
+			OutgoingResponse {
+				result: Err(()),
+				reputation_changes: vec![COST_INVALID_CANDIDATE],
+				sent_feedback: None,
+			},
+	};
+
+	pending_response
+		.send_outgoing_response(response)
+		.map_err(|_| NonFatal::SendResponse(peer))?;
+
+	Ok(result)
+}
diff --git a/polkadot/node/network/dispute-distribution/src/sender/error.rs b/polkadot/node/network/dispute-distribution/src/sender/error.rs
new file mode 100644
index 0000000000000000000000000000000000000000..31dd7bdcdb41b683d8d509e2699772a185d5aafa
--- /dev/null
+++ b/polkadot/node/network/dispute-distribution/src/sender/error.rs
@@ -0,0 +1,107 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+//
+
+//! Error handling related code and Error/Result definitions.
+
+use thiserror::Error;
+
+
+use polkadot_node_subsystem_util::{Fault, runtime};
+use polkadot_subsystem::SubsystemError;
+use polkadot_node_primitives::disputes::DisputeMessageCheckError;
+
+
+#[derive(Debug, Error)]
+#[error(transparent)]
+pub struct Error(pub Fault<NonFatal, Fatal>);
+
+impl From<NonFatal> for Error {
+	fn from(e: NonFatal) -> Self {
+		Self(Fault::from_non_fatal(e))
+	}
+}
+
+impl From<Fatal> for Error {
+	fn from(f: Fatal) -> Self {
+		Self(Fault::from_fatal(f))
+	}
+}
+
+impl From<runtime::Error> for Error {
+	fn from(o: runtime::Error) -> Self {
+		Self(Fault::from_other(o))
+	}
+}
+
+/// Fatal errors of this subsystem.
+#[derive(Debug, Error)]
+pub enum Fatal {
+	/// Spawning a running task failed.
+	#[error("Spawning subsystem task failed")]
+	SpawnTask(#[source] SubsystemError),
+
+	/// Errors coming from runtime::Runtime.
+	#[error("Error while accessing runtime information")]
+	Runtime(#[from] #[source] runtime::Fatal),
+}
+
+/// Non-fatal errors of this subsystem.
+#[derive(Debug, Error)]
+pub enum NonFatal {
+	/// We need available active heads for finding relevant authorities.
+	#[error("No active heads available - needed for finding relevant authorities.")]
+	NoActiveHeads,
+
+	/// This error likely indicates a bug in the coordinator.
+	#[error("Oneshot for asking dispute coordinator for active disputes got canceled.")]
+	AskActiveDisputesCanceled,
+
+	/// This error likely indicates a bug in the coordinator.
+	#[error("Oneshot for asking dispute coordinator for candidate votes got canceled.")]
+	AskCandidateVotesCanceled,
+
+	/// This error does indicate a bug in the coordinator.
+	///
+	/// We were not able to successfully construct a `DisputeMessage` from disputes votes.
+	#[error("Invalid dispute encountered")]
+	InvalidDisputeFromCoordinator(#[source] DisputeMessageCheckError),
+
+	/// This error does indicate a bug in the coordinator.
+	///
+	/// We did not receive votes on both sides for `CandidateVotes` received from the coordinator.
+	#[error("Missing votes for valid dispute")]
+	MissingVotesFromCoordinator,
+
+	/// This error does indicate a bug in the coordinator.
+	///
+	/// `SignedDisputeStatement` could not be reconstructed from recorded statements.
+	#[error("Invalid statements from coordinator")]
+	InvalidStatementFromCoordinator,
+
+	/// This error does indicate a bug in the coordinator.
+	///
+	/// A statement's `ValidatorIndex` could not be looked up.
+	#[error("ValidatorIndex of statement could not be found")]
+	InvalidValidatorIndexFromCoordinator,
+
+	/// Errors coming from runtime::Runtime.
+	#[error("Error while accessing runtime information")]
+	Runtime(#[from] #[source] runtime::NonFatal),
+}
+
+pub type Result<T> = std::result::Result<T, Error>;
+pub type NonFatalResult<T> = std::result::Result<T, NonFatal>;
diff --git a/polkadot/node/network/dispute-distribution/src/sender/mod.rs b/polkadot/node/network/dispute-distribution/src/sender/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..9b21e88bbf91008ba28eabc3a6491a48fe5cd36f
--- /dev/null
+++ b/polkadot/node/network/dispute-distribution/src/sender/mod.rs
@@ -0,0 +1,362 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+
+use std::collections::{HashMap, HashSet, hash_map::Entry};
+
+use futures::channel::{mpsc, oneshot};
+
+use polkadot_node_network_protocol::request_response::v1::DisputeRequest;
+use polkadot_node_primitives::{CandidateVotes, DisputeMessage, SignedDisputeStatement};
+use polkadot_node_subsystem_util::runtime::RuntimeInfo;
+use polkadot_primitives::v1::{CandidateHash, DisputeStatement, Hash, SessionIndex};
+use polkadot_subsystem::{
+	ActiveLeavesUpdate, SubsystemContext,
+	messages::{AllMessages, DisputeCoordinatorMessage}
+};
+
+
+/// For each ongoing dispute we have a `SendTask` which takes care of it.
+///
+/// It is going to spawn real tasks as it sees fit for getting the votes of the particular dispute
+/// out.
+mod send_task;
+use send_task::SendTask;
+pub use send_task::TaskFinish;
+
+/// Error and [`Result`] type for sender
+mod error;
+pub use error::{Result, Error, Fatal, NonFatal};
+
+use crate::{LOG_TARGET, Metrics};
+use self::error::NonFatalResult;
+
+/// The `DisputeSender` keeps track of all ongoing disputes we need to send statements out.
+///
+/// For each dispute a `SendTask` is responsible of sending to the concerned validators for that
+/// particular dispute. The `DisputeSender` keeps track of those tasks, informs them about new
+/// sessions/validator sets and cleans them up when they become obsolete.
+pub struct DisputeSender {
+	/// All heads we currently consider active.
+	active_heads: Vec<Hash>,
+
+	/// List of currently active sessions.
+	///
+	/// Value is the hash that was used for the query.
+	active_sessions: HashMap<SessionIndex, Hash>,
+
+	/// All ongoing dispute sendings this subsystem is aware of.
+	disputes: HashMap<CandidateHash, SendTask>,
+
+	/// Sender to be cloned for `SendTask`s.
+	tx: mpsc::Sender<TaskFinish>,
+
+	/// Metrics for reporting stats about sent requests.
+	metrics: Metrics,
+}
+
+impl DisputeSender
+{
+	/// Create a new `DisputeSender` which can be used to start dispute sendings.
+	pub fn new(tx: mpsc::Sender<TaskFinish>, metrics: Metrics) -> Self {
+		Self {
+			active_heads: Vec::new(),
+			active_sessions: HashMap::new(),
+			disputes: HashMap::new(),
+			tx,
+			metrics,
+		}
+	}
+
+	/// Create a `SendTask` for a particular new dispute.
+	pub async fn start_sender<Context: SubsystemContext>(
+		&mut self,
+		ctx: &mut Context,
+		runtime: &mut RuntimeInfo,
+		msg: DisputeMessage,
+	) -> Result<()> {
+		let req: DisputeRequest = msg.into();
+		let candidate_hash = req.0.candidate_receipt.hash();
+		match self.disputes.entry(candidate_hash) {
+			Entry::Occupied(_) => {
+				tracing::trace!(
+					target: LOG_TARGET,
+					?candidate_hash,
+					"Dispute sending already active."
+				);
+				return Ok(())
+			}
+			Entry::Vacant(vacant) => {
+				let send_task = SendTask::new(
+					ctx,
+					runtime,
+					&self.active_sessions,
+					self.tx.clone(),
+					req,
+				)
+				.await?;
+				vacant.insert(send_task);
+			}
+		}
+		Ok(())
+	}
+
+	/// Take care of a change in active leaves.
+	///
+	/// - Initiate a retry of failed sends which are still active.
+	/// - Get new authorities to send messages to.
+	/// - Get rid of obsolete tasks and disputes.
+	/// - Get dispute sending started in case we missed one for some reason (e.g. on node startup)
+	pub async fn update_leaves<Context: SubsystemContext>(
+		&mut self,
+		ctx: &mut Context,
+		runtime: &mut RuntimeInfo,
+		update: ActiveLeavesUpdate,
+	) -> Result<()> {
+		let ActiveLeavesUpdate { activated, deactivated } = update;
+		let deactivated: HashSet<_> = deactivated.into_iter().collect();
+		self.active_heads.retain(|h| !deactivated.contains(h));
+		self.active_heads.extend(activated.into_iter().map(|l| l.hash));
+
+		let have_new_sessions = self.refresh_sessions(ctx, runtime).await?;
+
+		let active_disputes = get_active_disputes(ctx).await?;
+		let unknown_disputes = {
+			let mut disputes = active_disputes.clone();
+			disputes.retain(|(_, c)| !self.disputes.contains_key(c));
+			disputes
+		};
+
+		let active_disputes: HashSet<_> = active_disputes.into_iter().map(|(_, c)| c).collect();
+
+		// Cleanup obsolete senders:
+		self.disputes.retain(
+			|candidate_hash, _| active_disputes.contains(candidate_hash)
+		);
+
+		for dispute in self.disputes.values_mut() {
+			if have_new_sessions || dispute.has_failed_sends() {
+				dispute.refresh_sends(ctx, runtime, &self.active_sessions).await?;
+			}
+		}
+
+		// This should only be non-empty on startup, but if not - we got you covered:
+		for dispute in unknown_disputes {
+			self.start_send_for_dispute(ctx, runtime, dispute).await?
+		}
+		Ok(())
+	}
+
+	/// Receive message from a sending task.
+	pub async fn on_task_message(&mut self, msg: TaskFinish) {
+
+		let TaskFinish { candidate_hash, receiver, result } = msg;
+
+		self.metrics.on_sent_request(result.as_metrics_label());
+
+		let task = match self.disputes.get_mut(&candidate_hash) {
+			None => {
+				// Can happen when a dispute ends, with messages still in queue:
+				tracing::trace!(
+					target: LOG_TARGET,
+					?result,
+					"Received `FromSendingTask::Finished` for non existing dispute."
+				);
+				return
+			}
+			Some(task) => task,
+		};
+		task.on_finished_send(&receiver, result);
+	}
+
+	/// Call `start_sender` on all passed in disputes.
+	///
+	/// Recover necessary votes for building up `DisputeMessage` and start sending for all of them.
+	async fn start_send_for_dispute<Context: SubsystemContext>(
+		&mut self,
+		ctx: &mut Context,
+		runtime: &mut RuntimeInfo,
+		dispute: (SessionIndex, CandidateHash),
+	) -> Result<()> {
+		let (session_index, candidate_hash) = dispute;
+		// We need some relay chain head for context for receiving session info information:
+		let ref_head = self.active_sessions.values().next().ok_or(NonFatal::NoActiveHeads)?;
+		let info = runtime.get_session_info_by_index(ctx.sender(), *ref_head, session_index).await?;
+		let our_index = match info.validator_info.our_index {
+			None => {
+				tracing::trace!(
+					target: LOG_TARGET,
+					"Not a validator in that session - not starting dispute sending."
+				);
+				return Ok(())
+			}
+			Some(index) => index,
+		};
+
+		let votes = match get_candidate_votes(ctx, session_index, candidate_hash).await? {
+			None => {
+				tracing::debug!(
+					target: LOG_TARGET,
+					?session_index,
+					?candidate_hash,
+					"No votes for active dispute?! - possible, due to race."
+				);
+				return Ok(())
+			}
+			Some(votes) => votes,
+		};
+
+		let our_valid_vote = votes
+			.valid
+			.iter()
+			.find(|(_, i, _)| *i == our_index);
+
+		let our_invalid_vote = votes
+			.invalid
+			.iter()
+			.find(|(_, i, _)| *i == our_index);
+
+		let (valid_vote, invalid_vote) =
+			if let Some(our_valid_vote) = our_valid_vote {
+				// Get some invalid vote as well:
+				let invalid_vote = votes
+					.invalid
+					.get(0)
+					.ok_or(NonFatal::MissingVotesFromCoordinator)?;
+				(our_valid_vote, invalid_vote)
+			} else if let Some(our_invalid_vote) = our_invalid_vote {
+				// Get some valid vote as well:
+				let valid_vote = votes
+					.valid
+					.get(0)
+					.ok_or(NonFatal::MissingVotesFromCoordinator)?;
+				(valid_vote, our_invalid_vote)
+			} else {
+				return Err(From::from(NonFatal::MissingVotesFromCoordinator))
+			}
+		;
+		let (kind, valid_index, signature) = valid_vote;
+		let valid_public = info
+			.session_info
+			.validators
+			.get(valid_index.0 as usize)
+			.ok_or(NonFatal::InvalidStatementFromCoordinator)?;
+		let valid_signed = SignedDisputeStatement::new_checked(
+			DisputeStatement::Valid(kind.clone()),
+			candidate_hash,
+			session_index,
+			valid_public.clone(),
+			signature.clone(),
+		)
+		.map_err(|()| NonFatal::InvalidStatementFromCoordinator)?;
+
+		let (kind, invalid_index, signature) = invalid_vote;
+		let invalid_public = info
+			.session_info
+			.validators
+			.get(invalid_index.0 as usize)
+			.ok_or(NonFatal::InvalidValidatorIndexFromCoordinator)?;
+		let invalid_signed = SignedDisputeStatement::new_checked(
+			DisputeStatement::Invalid(kind.clone()),
+			candidate_hash,
+			session_index,
+			invalid_public.clone(),
+			signature.clone(),
+		)
+		.map_err(|()| NonFatal::InvalidValidatorIndexFromCoordinator)?;
+
+		// Reconstructing the checked signed dispute statements is hardly useful here and wasteful,
+		// but I don't want to enable a bypass for the below smart constructor and this code path
+		// is supposed to be only hit on startup basically.
+		//
+		// Revisit this decision when the `from_signed_statements` is unneded for the normal code
+		// path as well.
+		let message = DisputeMessage::from_signed_statements(
+			valid_signed,
+			*valid_index,
+			invalid_signed,
+			*invalid_index,
+			votes.candidate_receipt,
+			&info.session_info
+		)
+		.map_err(NonFatal::InvalidDisputeFromCoordinator)?;
+
+		// Finally, get the party started:
+		self.start_sender(ctx, runtime, message).await
+	}
+
+	/// Make active sessions correspond to currently active heads.
+	///
+	/// Returns: true if sessions changed.
+	async fn refresh_sessions<Context: SubsystemContext>(
+		&mut self,
+		ctx: &mut Context,
+		runtime: &mut RuntimeInfo,
+	) -> Result<bool> {
+		let new_sessions = get_active_session_indeces(ctx, runtime, &self.active_heads).await?;
+		let new_sessions_raw: HashSet<_> = new_sessions.keys().collect();
+		let old_sessions_raw: HashSet<_> = self.active_sessions.keys().collect();
+		let updated = new_sessions_raw != old_sessions_raw;
+		// Update in any case, so we use current heads for queries:
+		self.active_sessions = new_sessions;
+		Ok(updated)
+	}
+}
+
+/// Retrieve the currently active sessions.
+///
+/// List is all indeces of all active sessions together with the head that was used for the query.
+async fn get_active_session_indeces<Context: SubsystemContext>(
+	ctx: &mut Context,
+	runtime: &mut RuntimeInfo,
+	active_heads: &Vec<Hash>,
+) -> Result<HashMap<SessionIndex, Hash>> {
+	let mut indeces = HashMap::new();
+	for head in active_heads {
+		let session_index = runtime.get_session_index(ctx.sender(), *head).await?;
+		indeces.insert(session_index, *head);
+	}
+	Ok(indeces)
+}
+
+/// Retrieve Set of active disputes from the dispute coordinator.
+async fn get_active_disputes<Context: SubsystemContext>(ctx: &mut Context)
+	-> NonFatalResult<Vec<(SessionIndex, CandidateHash)>> {
+	let (tx, rx) = oneshot::channel();
+	ctx.send_message(AllMessages::DisputeCoordinator(
+			DisputeCoordinatorMessage::ActiveDisputes(tx)
+	))
+	.await;
+	rx.await.map_err(|_| NonFatal::AskActiveDisputesCanceled)
+}
+
+/// Get all locally available dispute votes for a given dispute.
+async fn get_candidate_votes<Context: SubsystemContext>(
+	ctx: &mut Context,
+	session_index: SessionIndex,
+	candidate_hash: CandidateHash,
+) -> NonFatalResult<Option<CandidateVotes>> {
+	let (tx, rx) = oneshot::channel();
+	ctx.send_message(AllMessages::DisputeCoordinator(
+		DisputeCoordinatorMessage::QueryCandidateVotes(
+			session_index,
+			candidate_hash,
+			tx
+		)
+	))
+	.await;
+	rx.await.map_err(|_| NonFatal::AskCandidateVotesCanceled)
+}
diff --git a/polkadot/node/network/dispute-distribution/src/sender/send_task.rs b/polkadot/node/network/dispute-distribution/src/sender/send_task.rs
new file mode 100644
index 0000000000000000000000000000000000000000..6d4745365f0ee6851d7d8a7e1832f3523968ca2a
--- /dev/null
+++ b/polkadot/node/network/dispute-distribution/src/sender/send_task.rs
@@ -0,0 +1,328 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+
+use std::collections::HashMap;
+use std::collections::HashSet;
+
+use futures::Future;
+use futures::FutureExt;
+use futures::SinkExt;
+use futures::channel::mpsc;
+use futures::future::RemoteHandle;
+
+use polkadot_node_network_protocol::{
+	IfDisconnected,
+	request_response::{
+		OutgoingRequest, OutgoingResult, Recipient, Requests,
+		v1::{DisputeRequest, DisputeResponse},
+	}
+};
+use polkadot_node_subsystem_util::runtime::RuntimeInfo;
+use polkadot_primitives::v1::{
+	AuthorityDiscoveryId, CandidateHash, Hash, SessionIndex, ValidatorIndex,
+};
+use polkadot_subsystem::{
+	SubsystemContext,
+	messages::{AllMessages, NetworkBridgeMessage},
+};
+
+use super::error::{Fatal, Result};
+
+use crate::LOG_TARGET;
+use crate::metrics::FAILED;
+use crate::metrics::SUCCEEDED;
+
+/// Delivery status for a particular dispute.
+///
+/// Keeps track of all the validators that have to be reached for a dispute.
+pub struct SendTask {
+	/// The request we are supposed to get out to all parachain validators of the dispute's session
+	/// and to all current authorities.
+	request: DisputeRequest,
+
+	/// The set of authorities we need to send our messages to. This set will change at session
+	/// boundaries. It will always be at least the parachain validators of the session where the
+	/// dispute happened and the authorities of the current sessions as determined by active heads.
+	deliveries: HashMap<AuthorityDiscoveryId, DeliveryStatus>,
+
+	/// Whether or not we have any tasks failed since the last refresh.
+	has_failed_sends: bool,
+
+	/// Sender to be cloned for tasks.
+	tx: mpsc::Sender<TaskFinish>,
+}
+
+/// Status of a particular vote/statement delivery to a particular validator.
+enum DeliveryStatus {
+	/// Request is still in flight.
+	Pending(RemoteHandle<()>),
+	/// Succeeded - no need to send request to this peer anymore.
+	Succeeded,
+}
+
+/// A sending task finishes with this result:
+#[derive(Debug)]
+pub struct TaskFinish {
+	/// The candidate this task was running for.
+	pub candidate_hash: CandidateHash,
+	/// The authority the request was sent to.
+	pub receiver: AuthorityDiscoveryId,
+	/// The result of the delivery attempt.
+	pub result: TaskResult,
+}
+
+#[derive(Debug)]
+pub enum TaskResult {
+	/// Task succeeded in getting the request to its peer.
+	Succeeded,
+	/// Task was not able to get the request out to its peer.
+	///
+	/// It should be retried in that case.
+	Failed,
+}
+
+impl TaskResult {
+	pub fn as_metrics_label(&self) -> &'static str {
+		match self {
+			Self::Succeeded => SUCCEEDED,
+			Self::Failed => FAILED,
+		}
+	}
+}
+
+impl SendTask
+{
+	/// Initiates sending a dispute message to peers.
+	pub async fn new<Context: SubsystemContext>(
+		ctx: &mut Context,
+		runtime: &mut RuntimeInfo,
+		active_sessions: &HashMap<SessionIndex,Hash>,
+		tx: mpsc::Sender<TaskFinish>,
+		request: DisputeRequest,
+	) -> Result<Self> {
+		let mut send_task = Self {
+			request,
+			deliveries: HashMap::new(),
+			has_failed_sends: false,
+			tx,
+		};
+		send_task.refresh_sends(
+			ctx,
+			runtime,
+			active_sessions,
+		).await?;
+		Ok(send_task)
+	}
+
+	/// Make sure we are sending to all relevant authorities.
+	///
+	/// This function is called at construction and should also be called whenever a session change
+	/// happens and on a regular basis to ensure we are retrying failed attempts.
+	pub async fn refresh_sends<Context: SubsystemContext>(
+		&mut self,
+		ctx: &mut Context,
+		runtime: &mut RuntimeInfo,
+		active_sessions: &HashMap<SessionIndex, Hash>,
+	) -> Result<()> {
+		let new_authorities = self.get_relevant_validators(ctx, runtime, active_sessions).await?;
+
+		let add_authorities = new_authorities
+			.iter()
+			.filter(|a| !self.deliveries.contains_key(a))
+			.map(Clone::clone)
+			.collect();
+
+		// Get rid of dead/irrelevant tasks/statuses:
+		self.deliveries.retain(|k, _| new_authorities.contains(k));
+
+		// Start any new tasks that are needed:
+		let new_statuses = send_requests(
+			ctx,
+			self.tx.clone(),
+			add_authorities,
+			self.request.clone(),
+		).await?;
+
+		self.deliveries.extend(new_statuses.into_iter());
+		self.has_failed_sends = false;
+		Ok(())
+	}
+
+	/// Whether or not any sends have failed since the last refreshed.
+	pub fn has_failed_sends(&self) -> bool {
+		self.has_failed_sends
+	}
+
+	/// Handle a finished response waiting task.
+	pub fn on_finished_send(&mut self, authority: &AuthorityDiscoveryId, result: TaskResult) {
+		match result {
+			TaskResult::Failed => {
+				tracing::warn!(
+					target: LOG_TARGET,
+					candidate = ?self.request.0.candidate_receipt.hash(),
+					?authority,
+					"Could not get our message out! If this keeps happening, then check chain whether the dispute made it there."
+				);
+				self.has_failed_sends = true;
+				// Remove state, so we know what to try again:
+				self.deliveries.remove(authority);
+			}
+			TaskResult::Succeeded => {
+				let status = match self.deliveries.get_mut(&authority) {
+					None => {
+						// Can happen when a sending became irrelevant while the response was already
+						// queued.
+						tracing::debug!(
+							target: LOG_TARGET,
+							candidate = ?self.request.0.candidate_receipt.hash(),
+							?authority,
+							?result,
+							"Received `FromSendingTask::Finished` for non existing task."
+						);
+						return
+					}
+					Some(status) => status,
+				};
+				// We are done here:
+				*status = DeliveryStatus::Succeeded;
+			}
+		}
+	}
+
+
+	/// Determine all validators that should receive the given dispute requests.
+	///
+	/// This is all parachain validators of the session the candidate occurred and all authorities
+	/// of all currently active sessions, determined by currently active heads.
+	async fn get_relevant_validators<Context: SubsystemContext>(
+		&self,
+		ctx: &mut Context,
+		runtime: &mut RuntimeInfo,
+		active_sessions: &HashMap<SessionIndex, Hash>,
+	) -> Result<HashSet<AuthorityDiscoveryId>> {
+		let ref_head = self.request.0.candidate_receipt.descriptor.relay_parent;
+		// Parachain validators:
+		let info = runtime
+			.get_session_info_by_index(ctx.sender(), ref_head, self.request.0.session_index)
+			.await?;
+		let session_info = &info.session_info;
+		let validator_count = session_info.validators.len();
+		let mut authorities: HashSet<_> = session_info
+			.discovery_keys
+			.iter()
+			.take(validator_count)
+			.enumerate()
+			.filter(|(i, _)| Some(ValidatorIndex(*i as _)) != info.validator_info.our_index)
+			.map(|(_, v)| v.clone())
+			.collect();
+
+		// Current authorities:
+		for (session_index, head) in active_sessions.iter() {
+			let info = runtime.get_session_info_by_index(ctx.sender(), *head, *session_index).await?;
+			let session_info = &info.session_info;
+			let new_set = session_info
+				.discovery_keys
+				.iter()
+				.enumerate()
+				.filter(|(i, _)| Some(ValidatorIndex(*i as _)) != info.validator_info.our_index)
+				.map(|(_, v)| v.clone());
+			authorities.extend(new_set);
+		}
+		Ok(authorities)
+	}
+}
+
+
+/// Start sending of the given msg to all given authorities.
+///
+/// And spawn tasks for handling the response.
+async fn send_requests<Context: SubsystemContext>(
+	ctx: &mut Context,
+	tx: mpsc::Sender<TaskFinish>,
+	receivers: Vec<AuthorityDiscoveryId>,
+	req: DisputeRequest,
+) -> Result<HashMap<AuthorityDiscoveryId, DeliveryStatus>> {
+	let mut statuses = HashMap::with_capacity(receivers.len());
+	let mut reqs = Vec::with_capacity(receivers.len());
+
+	for receiver in receivers {
+		let (outgoing, pending_response) = OutgoingRequest::new(
+			Recipient::Authority(receiver.clone()),
+			req.clone(),
+		);
+
+		reqs.push(Requests::DisputeSending(outgoing));
+
+		let fut = wait_response_task(
+			pending_response,
+			req.0.candidate_receipt.hash(),
+			receiver.clone(),
+			tx.clone(),
+		);
+
+		let (remote, remote_handle) = fut.remote_handle();
+		ctx.spawn("dispute-sender", remote.boxed())
+			.map_err(Fatal::SpawnTask)?;
+		statuses.insert(receiver, DeliveryStatus::Pending(remote_handle));
+	}
+
+	let msg = NetworkBridgeMessage::SendRequests(
+		reqs,
+		// We should be connected, but the hell - if not, try!
+		IfDisconnected::TryConnect,
+	);
+	ctx.send_message(AllMessages::NetworkBridge(msg)).await;
+	Ok(statuses)
+}
+
+/// Future to be spawned in a task for awaiting a response.
+async fn wait_response_task(
+	pending_response: impl Future<Output = OutgoingResult<DisputeResponse>>,
+	candidate_hash: CandidateHash,
+	receiver: AuthorityDiscoveryId,
+	mut tx: mpsc::Sender<TaskFinish>,
+) {
+	let result = pending_response.await;
+	let msg = match result {
+		Err(err) => {
+			tracing::warn!(
+				target: LOG_TARGET,
+				%candidate_hash,
+				%receiver,
+				%err,
+				"Error sending dispute statements to node."
+			);
+			TaskFinish { candidate_hash, receiver, result: TaskResult::Failed}
+		}
+		Ok(DisputeResponse::Confirmed) => {
+			tracing::trace!(
+				target: LOG_TARGET,
+				%candidate_hash,
+				%receiver,
+				"Sending dispute message succeeded"
+			);
+			TaskFinish { candidate_hash, receiver, result: TaskResult::Succeeded }
+		}
+	};
+	if let Err(err) = tx.feed(msg).await {
+		tracing::debug!(
+			target: LOG_TARGET,
+			%err,
+			"Failed to notify susystem about dispute sending result."
+		);
+	}
+}
diff --git a/polkadot/node/network/dispute-distribution/src/tests/mock.rs b/polkadot/node/network/dispute-distribution/src/tests/mock.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ca057ce49e2a6a5c416a84512faa9b266854ae08
--- /dev/null
+++ b/polkadot/node/network/dispute-distribution/src/tests/mock.rs
@@ -0,0 +1,195 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+//
+
+//! Mock data and utility functions for unit tests in this subsystem.
+
+use std::{collections::HashMap, sync::Arc};
+
+use async_trait::async_trait;
+use lazy_static::lazy_static;
+
+use polkadot_node_network_protocol::{PeerId, authority_discovery::AuthorityDiscovery};
+use sc_keystore::LocalKeystore;
+use sp_application_crypto::AppKey;
+use sp_keyring::{Sr25519Keyring};
+use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr};
+
+use polkadot_node_primitives::{DisputeMessage, SignedDisputeStatement};
+use polkadot_primitives::v1::{
+	CandidateDescriptor, CandidateHash, CandidateReceipt, Hash,
+	SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, AuthorityDiscoveryId,
+};
+
+pub const MOCK_SESSION_INDEX: SessionIndex = 1;
+pub const MOCK_NEXT_SESSION_INDEX: SessionIndex = 2;
+pub const MOCK_VALIDATORS: [Sr25519Keyring; 6] = [
+	Sr25519Keyring::Ferdie,
+	Sr25519Keyring::Alice,
+	Sr25519Keyring::Bob,
+	Sr25519Keyring::Charlie,
+	Sr25519Keyring::Dave,
+	Sr25519Keyring::Eve,
+];
+
+pub const MOCK_AUTHORITIES_NEXT_SESSION: [Sr25519Keyring;2] = [
+	Sr25519Keyring::One,
+	Sr25519Keyring::Two,
+];
+
+pub const FERDIE_INDEX: ValidatorIndex = ValidatorIndex(0);
+pub const ALICE_INDEX: ValidatorIndex = ValidatorIndex(1);
+
+
+lazy_static! {
+
+/// Mocked AuthorityDiscovery service.
+pub static ref MOCK_AUTHORITY_DISCOVERY: MockAuthorityDiscovery = MockAuthorityDiscovery::new();
+// Creating an innocent looking `SessionInfo` is really expensive in a debug build. Around
+// 700ms on my machine, We therefore cache those keys here:
+pub static ref MOCK_VALIDATORS_DISCOVERY_KEYS: HashMap<Sr25519Keyring, AuthorityDiscoveryId> = 
+	MOCK_VALIDATORS
+	.iter()
+	.chain(MOCK_AUTHORITIES_NEXT_SESSION.iter())
+	.map(|v| (v.clone(), v.public().into()))
+	.collect()
+;
+pub static ref FERDIE_DISCOVERY_KEY: AuthorityDiscoveryId =
+	MOCK_VALIDATORS_DISCOVERY_KEYS.get(&Sr25519Keyring::Ferdie).unwrap().clone();
+
+pub static ref MOCK_SESSION_INFO: SessionInfo =
+	SessionInfo {
+		validators: MOCK_VALIDATORS.iter().take(4).map(|k| k.public().into()).collect(),
+		discovery_keys: MOCK_VALIDATORS
+			.iter()
+			.map(|k| MOCK_VALIDATORS_DISCOVERY_KEYS.get(&k).unwrap().clone())
+			.collect(),
+		..Default::default()
+	};
+
+/// SessionInfo for the second session. (No more validators, but two more authorities.
+pub static ref MOCK_NEXT_SESSION_INFO: SessionInfo =
+	SessionInfo {
+		discovery_keys:
+			MOCK_AUTHORITIES_NEXT_SESSION
+				.iter()
+				.map(|k| MOCK_VALIDATORS_DISCOVERY_KEYS.get(&k).unwrap().clone())
+				.collect(),
+		..Default::default()
+	};
+}
+
+
+pub fn make_candidate_receipt(relay_parent: Hash) -> CandidateReceipt {
+	CandidateReceipt {
+		descriptor: CandidateDescriptor {
+			relay_parent,
+			..Default::default()
+		},
+		commitments_hash: Hash::random(),
+	}
+}
+
+pub async fn make_explicit_signed(
+	validator: Sr25519Keyring,
+	candidate_hash: CandidateHash,
+	valid: bool
+) -> SignedDisputeStatement {
+	let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
+	SyncCryptoStore::sr25519_generate_new(
+		&*keystore,
+		ValidatorId::ID,
+		Some(&validator.to_seed()),
+	)
+	.expect("Insert key into keystore");
+
+	SignedDisputeStatement::sign_explicit(
+		&keystore,
+		valid,
+		candidate_hash,
+		MOCK_SESSION_INDEX,
+		validator.public().into(),
+	)
+	.await
+	.expect("Keystore should be fine.")
+	.expect("Signing should work.")
+}
+
+
+pub async fn make_dispute_message(
+	candidate: CandidateReceipt,
+	valid_validator: ValidatorIndex,
+	invalid_validator: ValidatorIndex,
+) -> DisputeMessage {
+	let candidate_hash = candidate.hash();
+	let valid_vote = 
+		make_explicit_signed(MOCK_VALIDATORS[valid_validator.0 as usize], candidate_hash, true).await;
+	let invalid_vote =
+		make_explicit_signed(MOCK_VALIDATORS[invalid_validator.0 as usize], candidate_hash, false).await;
+	DisputeMessage::from_signed_statements(
+		valid_vote,
+		valid_validator,
+		invalid_vote,
+		invalid_validator,
+		candidate,
+		&MOCK_SESSION_INFO,
+	)
+	.expect("DisputeMessage construction should work.")
+}
+
+/// Dummy `AuthorityDiscovery` service.
+#[derive(Debug, Clone)]
+pub struct MockAuthorityDiscovery {
+	peer_ids: HashMap<Sr25519Keyring, PeerId>
+}
+
+impl MockAuthorityDiscovery {
+	pub fn new() -> Self {
+		let mut peer_ids = HashMap::new();
+		peer_ids.insert(Sr25519Keyring::Alice, PeerId::random());
+		peer_ids.insert(Sr25519Keyring::Bob, PeerId::random());
+		peer_ids.insert(Sr25519Keyring::Ferdie, PeerId::random());
+		peer_ids.insert(Sr25519Keyring::Charlie, PeerId::random());
+		peer_ids.insert(Sr25519Keyring::Dave, PeerId::random());
+		peer_ids.insert(Sr25519Keyring::Eve, PeerId::random());
+		peer_ids.insert(Sr25519Keyring::One, PeerId::random());
+		peer_ids.insert(Sr25519Keyring::Two, PeerId::random());
+
+		Self { peer_ids }
+	}
+
+	pub fn get_peer_id_by_authority(&self, authority: Sr25519Keyring) -> PeerId {
+		*self.peer_ids.get(&authority).expect("Tester only picks valid authorities")
+	}
+}
+
+#[async_trait]
+impl AuthorityDiscovery for MockAuthorityDiscovery {
+	async fn get_addresses_by_authority_id(&mut self, _authority: polkadot_primitives::v1::AuthorityDiscoveryId)
+		-> Option<Vec<sc_network::Multiaddr>> {
+			panic!("Not implemented");
+	}
+
+	async fn get_authority_id_by_peer_id(&mut self, peer_id: polkadot_node_network_protocol::PeerId)
+		-> Option<polkadot_primitives::v1::AuthorityDiscoveryId> {
+		for (a, p) in self.peer_ids.iter() {
+			if p == &peer_id {
+				return Some(MOCK_VALIDATORS_DISCOVERY_KEYS.get(&a).unwrap().clone())
+			}
+		}
+		None
+	}
+}
diff --git a/polkadot/node/network/dispute-distribution/src/tests/mod.rs b/polkadot/node/network/dispute-distribution/src/tests/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ca535459761e5dde6d784294e0fb0d68a2cbc1e7
--- /dev/null
+++ b/polkadot/node/network/dispute-distribution/src/tests/mod.rs
@@ -0,0 +1,765 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+//
+
+//! Subsystem unit tests
+
+use std::collections::HashSet;
+use std::sync::Arc;
+use std::task::Poll;
+use std::time::Duration;
+
+use assert_matches::assert_matches;
+use futures::{
+	channel::{oneshot, mpsc},
+	future::poll_fn,
+	pin_mut,
+	SinkExt, Future
+};
+use futures_timer::Delay;
+use parity_scale_codec::{Encode, Decode};
+
+use polkadot_node_network_protocol::PeerId;
+use polkadot_node_network_protocol::request_response::v1::DisputeRequest;
+use sp_keyring::Sr25519Keyring;
+
+use polkadot_node_network_protocol::{IfDisconnected, request_response::{Recipient, Requests, v1::DisputeResponse}};
+use polkadot_node_primitives::{CandidateVotes, UncheckedDisputeMessage};
+use polkadot_primitives::v1::{AuthorityDiscoveryId, CandidateHash, Hash, SessionIndex, SessionInfo};
+use polkadot_subsystem::messages::{DisputeCoordinatorMessage, ImportStatementsResult};
+use polkadot_subsystem::{
+	ActivatedLeaf, ActiveLeavesUpdate, FromOverseer, LeafStatus, OverseerSignal, Span,
+	messages::{
+		AllMessages, DisputeDistributionMessage, NetworkBridgeMessage, RuntimeApiMessage, RuntimeApiRequest
+	},
+};
+use polkadot_subsystem_testhelpers::{TestSubsystemContextHandle, mock::make_ferdie_keystore, subsystem_test_harness};
+
+use crate::{DisputeDistributionSubsystem, LOG_TARGET, Metrics};
+use self::mock::{
+	ALICE_INDEX, FERDIE_INDEX, make_candidate_receipt, make_dispute_message,
+	MOCK_AUTHORITY_DISCOVERY, MOCK_SESSION_INDEX, MOCK_SESSION_INFO, MOCK_NEXT_SESSION_INDEX,
+	MOCK_NEXT_SESSION_INFO, FERDIE_DISCOVERY_KEY,
+};
+
+/// Useful mock providers.
+pub mod mock;
+
+#[test]
+fn send_dispute_sends_dispute() {
+	let test = |mut handle: TestSubsystemContextHandle<DisputeDistributionMessage>|
+		async move {
+
+			let (_, _) = handle_subsystem_startup(&mut handle, None).await;
+
+			let relay_parent = Hash::random();
+			let candidate = make_candidate_receipt(relay_parent);
+			let message =
+				make_dispute_message(candidate.clone(), ALICE_INDEX, FERDIE_INDEX,).await;
+			handle.send(
+				FromOverseer::Communication {
+					msg: DisputeDistributionMessage::SendDispute(message.clone())
+				}
+			).await;
+			// Requests needed session info:
+			assert_matches!(
+				handle.recv().await,
+				AllMessages::RuntimeApi(
+					RuntimeApiMessage::Request(
+						hash,
+						RuntimeApiRequest::SessionInfo(session_index, tx)
+					)
+				) => {
+					assert_eq!(session_index, MOCK_SESSION_INDEX);
+					assert_eq!(
+						hash,
+						message.candidate_receipt().descriptor.relay_parent
+					);
+					tx.send(Ok(Some(MOCK_SESSION_INFO.clone()))).expect("Receiver should stay alive.");
+				}
+			);
+
+			let expected_receivers = {
+				let info = &MOCK_SESSION_INFO;
+				info.discovery_keys
+					.clone()
+					.into_iter()
+					.filter(|a| a != &Sr25519Keyring::Ferdie.public().into())
+					.collect()
+				// All validators are also authorities in the first session, so we are
+				// done here.
+			};
+			check_sent_requests(&mut handle, expected_receivers, true).await;
+
+			conclude(&mut handle).await;
+	};
+	test_harness(test);
+}
+
+#[test]
+fn received_request_triggers_import() {
+	let test = |mut handle: TestSubsystemContextHandle<DisputeDistributionMessage>|
+		async move {
+			let (_, mut req_tx) = handle_subsystem_startup(&mut handle, None).await;
+
+			let relay_parent = Hash::random();
+			let candidate = make_candidate_receipt(relay_parent);
+			let message =
+				make_dispute_message(candidate.clone(), ALICE_INDEX, FERDIE_INDEX,).await;
+
+			// Non validator request should get dropped:
+			let rx_response = send_network_dispute_request(
+				&mut req_tx,
+				PeerId::random(),
+				message.clone().into()
+			).await;
+
+			assert_matches!(
+				rx_response.await,
+				Ok(resp) => {
+					let sc_network::config::OutgoingResponse {
+						result: _,
+						reputation_changes,
+						sent_feedback: _,
+					} = resp;
+					// Peer should get punished:
+					assert_eq!(reputation_changes.len(), 1);
+				}
+			);
+
+			// Nested valid and invalid import.
+			//
+			// Nested requests from same peer should get dropped. For the invalid request even
+			// subsequent requests should get dropped.
+			nested_network_dispute_request(
+				&mut handle,
+				&mut req_tx,
+				MOCK_AUTHORITY_DISCOVERY.get_peer_id_by_authority(Sr25519Keyring::Alice),
+				message.clone().into(),
+				ImportStatementsResult::InvalidImport,
+				true,
+				move |handle, req_tx, message| 
+					nested_network_dispute_request(
+						handle, 
+						req_tx,
+						MOCK_AUTHORITY_DISCOVERY.get_peer_id_by_authority(Sr25519Keyring::Bob),
+						message.clone().into(),
+						ImportStatementsResult::ValidImport,
+						false,
+						move |_, req_tx, message| async move {
+							// Another request from Alice should get dropped (request already in
+							// flight):
+							{
+								let rx_response = send_network_dispute_request(
+									req_tx,
+									MOCK_AUTHORITY_DISCOVERY.get_peer_id_by_authority(Sr25519Keyring::Alice),
+									message.clone(),
+								).await;
+
+								assert_matches!(
+									rx_response.await,
+									Err(err) => {
+										tracing::trace!(
+											target: LOG_TARGET,
+											?err,
+											"Request got dropped - other request already in flight"
+										);
+									}
+								);
+							}
+							// Another request from Bob should get dropped (request already in
+							// flight):
+							{
+								let rx_response = send_network_dispute_request(
+									req_tx,
+									MOCK_AUTHORITY_DISCOVERY.get_peer_id_by_authority(Sr25519Keyring::Bob),
+									message.clone(),
+								).await;
+
+								assert_matches!(
+									rx_response.await,
+									Err(err) => {
+										tracing::trace!(
+											target: LOG_TARGET,
+											?err,
+											"Request got dropped - other request already in flight"
+										);
+									}
+								);
+							}
+						}
+					)
+			).await;
+
+			// Subsequent sends from Alice should fail (peer is banned):
+			{
+				let rx_response = send_network_dispute_request(
+					&mut req_tx,
+					MOCK_AUTHORITY_DISCOVERY.get_peer_id_by_authority(Sr25519Keyring::Alice),
+					message.clone().into()
+				).await;
+
+				assert_matches!(
+					rx_response.await,
+					Err(err) => {
+						tracing::trace!(
+							target: LOG_TARGET,
+							?err,
+							"Request got dropped - peer is banned."
+							);
+					}
+				);
+			}
+
+			// But should work fine for Bob:
+			nested_network_dispute_request(
+				&mut handle, 
+				&mut req_tx, 
+				MOCK_AUTHORITY_DISCOVERY.get_peer_id_by_authority(Sr25519Keyring::Bob),
+				message.clone().into(),
+				ImportStatementsResult::ValidImport,
+				false,
+				|_, _, _| async {}
+			).await;
+
+			tracing::trace!(target: LOG_TARGET, "Concluding.");
+			conclude(&mut handle).await;
+	};
+	test_harness(test);
+}
+
+#[test]
+fn disputes_are_recovered_at_startup() {
+	let test = |mut handle: TestSubsystemContextHandle<DisputeDistributionMessage>|
+		async move {
+
+			let relay_parent = Hash::random();
+			let candidate = make_candidate_receipt(relay_parent);
+
+			let (_, _) = handle_subsystem_startup(&mut handle, Some(candidate.hash())).await;
+
+			let message =
+				make_dispute_message(candidate.clone(), ALICE_INDEX, FERDIE_INDEX,).await;
+			// Requests needed session info:
+			assert_matches!(
+				handle.recv().await,
+				AllMessages::DisputeCoordinator(
+					DisputeCoordinatorMessage::QueryCandidateVotes(
+						session_index,
+						candidate_hash,
+						tx,
+					)
+				) => {
+					assert_eq!(session_index, MOCK_SESSION_INDEX);
+					assert_eq!(candidate_hash, candidate.hash());
+					let unchecked: UncheckedDisputeMessage = message.into();
+					tx.send(Some(CandidateVotes {
+						candidate_receipt: candidate,
+						valid: vec![(
+							unchecked.valid_vote.kind,
+							unchecked.valid_vote.validator_index,
+							unchecked.valid_vote.signature
+						)],
+						invalid: vec![(
+							unchecked.invalid_vote.kind,
+							unchecked.invalid_vote.validator_index,
+							unchecked.invalid_vote.signature
+						)],
+					}))
+					.expect("Receiver should stay alive.");
+				}
+			);
+
+			let expected_receivers = {
+				let info = &MOCK_SESSION_INFO;
+				info.discovery_keys
+					.clone()
+					.into_iter()
+					.filter(|a| a != &Sr25519Keyring::Ferdie.public().into())
+					.collect()
+				// All validators are also authorities in the first session, so we are
+				// done here.
+			};
+			check_sent_requests(&mut handle, expected_receivers, true).await;
+
+			conclude(&mut handle).await;
+	};
+	test_harness(test);
+}
+
+#[test]
+fn send_dispute_gets_cleaned_up() {
+	let test = |mut handle: TestSubsystemContextHandle<DisputeDistributionMessage>|
+		async move {
+
+			let (old_head, _) = handle_subsystem_startup(&mut handle, None).await;
+
+			let relay_parent = Hash::random();
+			let candidate = make_candidate_receipt(relay_parent);
+			let message =
+				make_dispute_message(candidate.clone(), ALICE_INDEX, FERDIE_INDEX,).await;
+			handle.send(
+				FromOverseer::Communication {
+					msg: DisputeDistributionMessage::SendDispute(message.clone())
+				}
+			).await;
+			// Requests needed session info:
+			assert_matches!(
+				handle.recv().await,
+				AllMessages::RuntimeApi(
+					RuntimeApiMessage::Request(
+						hash,
+						RuntimeApiRequest::SessionInfo(session_index, tx)
+					)
+				) => {
+					assert_eq!(session_index, MOCK_SESSION_INDEX);
+					assert_eq!(
+						hash,
+						message.candidate_receipt().descriptor.relay_parent
+					);
+					tx.send(Ok(Some(MOCK_SESSION_INFO.clone()))).expect("Receiver should stay alive.");
+				}
+			);
+
+			let expected_receivers = {
+				let info = &MOCK_SESSION_INFO;
+				info.discovery_keys
+					.clone()
+					.into_iter()
+					.filter(|a| a != &Sr25519Keyring::Ferdie.public().into())
+					.collect()
+				// All validators are also authorities in the first session, so we are
+				// done here.
+			};
+			check_sent_requests(&mut handle, expected_receivers, false).await;
+
+			// Give tasks a chance to finish:
+			Delay::new(Duration::from_millis(20)).await;
+
+			activate_leaf(
+				&mut handle,
+				Hash::random(),
+				Some(old_head),
+				MOCK_SESSION_INDEX,
+				None,
+				// No disputes any more:
+				Vec::new(),
+			).await;
+
+			// Yield, so subsystem can make progess:
+			Delay::new(Duration::from_millis(2)).await;
+
+			conclude(&mut handle).await;
+	};
+	test_harness(test);
+}
+
+#[test]
+fn dispute_retries_and_works_across_session_boundaries() {
+	let test = |mut handle: TestSubsystemContextHandle<DisputeDistributionMessage>|
+		async move {
+
+			let (old_head, _) = handle_subsystem_startup(&mut handle, None).await;
+
+			let relay_parent = Hash::random();
+			let candidate = make_candidate_receipt(relay_parent);
+			let message =
+				make_dispute_message(candidate.clone(), ALICE_INDEX, FERDIE_INDEX,).await;
+			handle.send(
+				FromOverseer::Communication {
+					msg: DisputeDistributionMessage::SendDispute(message.clone())
+				}
+			).await;
+			// Requests needed session info:
+			assert_matches!(
+				handle.recv().await,
+				AllMessages::RuntimeApi(
+					RuntimeApiMessage::Request(
+						hash,
+						RuntimeApiRequest::SessionInfo(session_index, tx)
+					)
+				) => {
+					assert_eq!(session_index, MOCK_SESSION_INDEX);
+					assert_eq!(
+						hash,
+						message.candidate_receipt().descriptor.relay_parent
+					);
+					tx.send(Ok(Some(MOCK_SESSION_INFO.clone()))).expect("Receiver should stay alive.");
+				}
+			);
+
+			let expected_receivers: HashSet<_> = {
+				let info = &MOCK_SESSION_INFO;
+				info.discovery_keys
+					.clone()
+					.into_iter()
+					.filter(|a| a != &Sr25519Keyring::Ferdie.public().into())
+					.collect()
+				// All validators are also authorities in the first session, so we are
+				// done here.
+			};
+			// Requests don't get confirmed - dispute is carried over to next session.
+			check_sent_requests(&mut handle, expected_receivers.clone(), false).await;
+
+			// Give tasks a chance to finish:
+			Delay::new(Duration::from_millis(20)).await;
+
+			// Trigger retry:
+			let old_head2 = Hash::random();
+			activate_leaf(
+				&mut handle,
+				old_head2,
+				Some(old_head),
+				MOCK_SESSION_INDEX,
+				None,
+				vec![(MOCK_SESSION_INDEX, candidate.hash())]
+			).await;
+
+			check_sent_requests(&mut handle, expected_receivers.clone(), false).await;
+			// Give tasks a chance to finish:
+			Delay::new(Duration::from_millis(20)).await;
+
+			// Session change:
+			activate_leaf(
+				&mut handle,
+				Hash::random(),
+				Some(old_head2),
+				MOCK_NEXT_SESSION_INDEX,
+				Some(MOCK_NEXT_SESSION_INFO.clone()),
+				vec![(MOCK_SESSION_INDEX, candidate.hash())]
+			).await;
+
+			let expected_receivers = {
+				let validator_count = MOCK_SESSION_INFO.validators.len();
+				let old_validators = MOCK_SESSION_INFO
+					.discovery_keys
+					.clone()
+					.into_iter()
+					.take(validator_count)
+					.filter(|a| *a != *FERDIE_DISCOVERY_KEY);
+
+				MOCK_NEXT_SESSION_INFO
+					.discovery_keys
+					.clone()
+					.into_iter()
+					.filter(|a| *a != *FERDIE_DISCOVERY_KEY)
+					.chain(old_validators)
+					.collect()
+			};
+			check_sent_requests(&mut handle, expected_receivers, true).await;
+
+			conclude(&mut handle).await;
+	};
+	test_harness(test);
+}
+
+async fn send_network_dispute_request(
+	req_tx: &mut mpsc::Sender<sc_network::config::IncomingRequest>,
+	peer: PeerId,
+	message: DisputeRequest,
+) -> oneshot::Receiver<sc_network::config::OutgoingResponse> {
+	let (pending_response, rx_response) = oneshot::channel();
+	let req = sc_network::config::IncomingRequest {
+		peer, 
+		payload: message.encode(),
+		pending_response,
+	};
+	req_tx.feed(req).await.unwrap();
+	rx_response
+}
+
+/// Send request and handle its reactions.
+///
+/// Passed in function will be called while votes are still being imported.
+async fn nested_network_dispute_request<'a, F, O>(
+	handle: &'a mut TestSubsystemContextHandle<DisputeDistributionMessage>,
+	req_tx: &'a mut mpsc::Sender<sc_network::config::IncomingRequest>,
+	peer: PeerId,
+	message: DisputeRequest,
+	import_result: ImportStatementsResult,
+	need_session_info: bool,
+	inner: F,
+) 
+	where
+		F: FnOnce(
+				&'a mut TestSubsystemContextHandle<DisputeDistributionMessage>,
+				&'a mut mpsc::Sender<sc_network::config::IncomingRequest>,
+				DisputeRequest,
+			) -> O + 'a,
+		O: Future<Output = ()> + 'a
+{
+	let rx_response = send_network_dispute_request(
+		req_tx,
+		peer,
+		message.clone().into()
+	).await;
+
+	if need_session_info {
+		// Subsystem might need `SessionInfo` for determining indices:
+		match handle.recv().await {
+			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+					_,
+					RuntimeApiRequest::SessionInfo(_, tx)
+			)) => {
+				tx.send(Ok(Some(MOCK_SESSION_INFO.clone()))).expect("Receiver should stay alive.");
+			}
+			unexpected => panic!("Unexpected message {:?}", unexpected),
+		}
+	}
+
+	// Import should get initiated:
+	let pending_confirmation = assert_matches!(
+		handle.recv().await,
+		AllMessages::DisputeCoordinator(
+			DisputeCoordinatorMessage::ImportStatements {
+				candidate_hash,
+				candidate_receipt,
+				session,
+				statements,
+				pending_confirmation,
+			}
+		) => {
+			assert_eq!(session, MOCK_SESSION_INDEX);
+			assert_eq!(candidate_hash, message.0.candidate_receipt.hash());
+			assert_eq!(candidate_hash, candidate_receipt.hash());
+			assert_eq!(statements.len(), 2);
+			pending_confirmation
+		}
+	);
+	
+	// Do the inner thing:
+	inner(handle, req_tx, message).await;
+
+	// Confirm import
+	pending_confirmation.send(import_result).unwrap();
+
+	assert_matches!(
+		rx_response.await,
+		Ok(resp) => {
+			let sc_network::config::OutgoingResponse {
+				result,
+				reputation_changes,
+				sent_feedback,
+			} = resp;
+
+			match import_result {
+				ImportStatementsResult::ValidImport => {
+					let result = result.unwrap();
+					let decoded = 
+						<DisputeResponse as Decode>::decode(&mut result.as_slice()).unwrap();
+
+					assert!(decoded == DisputeResponse::Confirmed);
+					if let Some(sent_feedback) = sent_feedback {
+						sent_feedback.send(()).unwrap();
+					}
+					tracing::trace!(
+						target: LOG_TARGET,
+						"Valid import happened."
+					);
+
+				}
+				ImportStatementsResult::InvalidImport => {
+					// Peer should get punished:
+					assert_eq!(reputation_changes.len(), 1);
+				}
+			}
+		}
+	);
+}
+
+async fn conclude(
+	handle: &mut TestSubsystemContextHandle<DisputeDistributionMessage>,
+) {
+	// No more messages should be in the queue:
+	poll_fn(|ctx| {
+		let fut = handle.recv();
+		pin_mut!(fut);
+		// No requests should be inititated, as there is no longer any dispute active:
+		assert_matches!(
+			fut.poll(ctx),
+			Poll::Pending,
+			"No requests expected"
+			);
+		Poll::Ready(())
+	}).await;
+
+	handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
+}
+
+/// Pass a `new_session` if you expect the subsystem to retrieve `SessionInfo` when given the
+/// `session_index`.
+async fn activate_leaf(
+	handle: &mut TestSubsystemContextHandle<DisputeDistributionMessage>,
+	activate: Hash,
+	deactivate: Option<Hash>,
+	session_index: SessionIndex,
+	// New session if we expect the subsystem to request it.
+	new_session: Option<SessionInfo>,
+	// Currently active disputes to send to the subsystem.
+	active_disputes: Vec<(SessionIndex, CandidateHash)>,
+) {
+	let has_active_disputes = !active_disputes.is_empty();
+	handle.send(FromOverseer::Signal(
+		OverseerSignal::ActiveLeaves(
+			ActiveLeavesUpdate {
+				activated: [ActivatedLeaf {
+						hash: activate,
+						number: 10,
+						status: LeafStatus::Fresh,
+						span: Arc::new(Span::Disabled),
+					}][..]
+				   .into(),
+				deactivated: deactivate.into_iter().collect(),
+			}
+
+	)))
+	.await;
+	assert_matches!(
+		handle.recv().await,
+		AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+			h,
+			RuntimeApiRequest::SessionIndexForChild(tx)
+		)) => {
+			assert_eq!(h, activate);
+			tx.send(Ok(session_index)).expect("Receiver should stay alive.");
+		}
+	);
+	assert_matches!(
+		handle.recv().await,
+		AllMessages::DisputeCoordinator(DisputeCoordinatorMessage::ActiveDisputes(tx)) => {
+			tx.send(active_disputes).expect("Receiver should stay alive.");
+		}
+	);
+
+	let new_session = match (new_session, has_active_disputes) {
+		(Some(new_session), true) => new_session,
+		_ => return,
+	};
+
+	assert_matches!(
+		handle.recv().await,
+		AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+				h,
+				RuntimeApiRequest::SessionInfo(i, tx)
+		)) => {
+			assert_eq!(h, activate);
+			assert_eq!(i, session_index);
+			tx.send(Ok(Some(new_session))).expect("Receiver should stay alive.");
+		}
+	);
+}
+
+/// Check whether sent network bridge requests match the expectation.
+async fn check_sent_requests(
+	handle: &mut TestSubsystemContextHandle<DisputeDistributionMessage>,
+	expected_receivers: HashSet<AuthorityDiscoveryId>,
+	confirm_receive: bool,
+) {
+	let expected_receivers: HashSet<_> =
+		expected_receivers
+			.into_iter()
+			.map(Recipient::Authority)
+			.collect();
+
+	// Sends to concerned validators:
+	assert_matches!(
+		handle.recv().await,
+		AllMessages::NetworkBridge(
+			NetworkBridgeMessage::SendRequests(reqs, IfDisconnected::TryConnect)
+		) => {
+			let reqs: Vec<_> = reqs.into_iter().map(|r|
+				assert_matches!(
+					r,
+					Requests::DisputeSending(req) => {req}
+				)
+			)
+			.collect();
+
+			let receivers_raw: Vec<_> = reqs.iter().map(|r| r.peer.clone()).collect();
+			let receivers: HashSet<_> = receivers_raw.clone().clone().into_iter().collect();
+			assert_eq!(receivers_raw.len(), receivers.len(), "No duplicates are expected.");
+			assert_eq!(receivers.len(), expected_receivers.len());
+			assert_eq!(receivers, expected_receivers);
+			if confirm_receive {
+				for req in reqs {
+					req.pending_response.send(
+						Ok(DisputeResponse::Confirmed.encode())
+					)
+					.expect("Subsystem should be listening for a response.");
+				}
+			}
+		}
+	);
+}
+
+/// Initialize subsystem and return request sender needed for sending incoming requests to the
+/// subsystem.
+async fn handle_subsystem_startup(
+	handle: &mut TestSubsystemContextHandle<DisputeDistributionMessage>,
+	ongoing_dispute: Option<CandidateHash>,
+) -> (Hash, mpsc::Sender<sc_network::config::IncomingRequest>) {
+	let (request_tx, request_rx) = mpsc::channel(5);
+	handle.send(
+		FromOverseer::Communication {
+			msg: DisputeDistributionMessage::DisputeSendingReceiver(request_rx),
+		}
+	).await;
+
+	let relay_parent = Hash::random();
+	activate_leaf(
+		handle,
+		relay_parent,
+		None,
+		MOCK_SESSION_INDEX,
+		Some(MOCK_SESSION_INFO.clone()),
+		ongoing_dispute.into_iter().map(|c| (MOCK_SESSION_INDEX, c)).collect()
+	).await;
+	(relay_parent, request_tx)
+}
+
+
+/// Launch subsystem and provided test function
+///
+/// which simulates the overseer.
+fn test_harness<TestFn, Fut>(test: TestFn)
+where
+	TestFn: FnOnce(TestSubsystemContextHandle<DisputeDistributionMessage>) -> Fut,
+	Fut: Future<Output = ()>
+{
+	sp_tracing::try_init_simple();
+	let keystore = make_ferdie_keystore();
+
+	let subsystem = DisputeDistributionSubsystem::new(
+		keystore,
+		MOCK_AUTHORITY_DISCOVERY.clone(),
+		Metrics::new_dummy()
+	);
+
+	let subsystem = |ctx| async {
+		match subsystem.run(ctx).await {
+			Ok(()) => {},
+			Err(fatal) => {
+				tracing::debug!(
+					target: LOG_TARGET,
+					?fatal,
+					"Dispute distribution exited with fatal error."
+				);
+			}
+		}
+	};
+	subsystem_test_harness(test, subsystem);
+}
+
diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml
index acce9e2229688e2158b1ea5bc99f52f91c553ba2..aebeb0b18a1041df70523ab18d0ff97a0716dc51 100644
--- a/polkadot/node/network/gossip-support/Cargo.toml
+++ b/polkadot/node/network/gossip-support/Cargo.toml
@@ -22,7 +22,6 @@ tracing = "0.1.26"
 [dev-dependencies]
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] }
 sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" }
 
 polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
diff --git a/polkadot/node/network/gossip-support/src/tests.rs b/polkadot/node/network/gossip-support/src/tests.rs
index 8d80d84d9b0b9e3464d464f7e112603e71c8deae..a37b8ac04e49a945e415743f98b7872284618063 100644
--- a/polkadot/node/network/gossip-support/src/tests.rs
+++ b/polkadot/node/network/gossip-support/src/tests.rs
@@ -23,12 +23,11 @@ use polkadot_node_subsystem::{
 };
 use polkadot_node_subsystem_test_helpers as test_helpers;
 use polkadot_node_subsystem_util::TimeoutExt as _;
-use sc_keystore::LocalKeystore;
 use sp_keyring::Sr25519Keyring;
-use sp_keystore::SyncCryptoStore;
 use sp_consensus_babe::{
 	Epoch as BabeEpoch, BabeEpochConfiguration, AllowedSlots,
 };
+use test_helpers::mock::make_ferdie_keystore;
 
 use std::sync::Arc;
 use std::time::Duration;
@@ -98,17 +97,6 @@ async fn overseer_recv(
 	msg
 }
 
-fn make_ferdie_keystore() -> SyncCryptoStorePtr {
-	let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
-	SyncCryptoStore::sr25519_generate_new(
-		&*keystore,
-		AuthorityDiscoveryId::ID,
-		Some(&Sr25519Keyring::Ferdie.to_seed()),
-	)
-	.expect("Insert key into keystore");
-	keystore
-}
-
 fn authorities() -> Vec<AuthorityDiscoveryId> {
 	vec![
 		Sr25519Keyring::Alice.public().into(),
diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml
index 2e83e01fa04523df237e4d398000fae25178c58c..e549acf39cf696004e5af90c7b8d3d969f2376db 100644
--- a/polkadot/node/network/protocol/Cargo.toml
+++ b/polkadot/node/network/protocol/Cargo.toml
@@ -6,11 +6,13 @@ edition = "2018"
 description = "Primitives types for the Node-side"
 
 [dependencies]
+async-trait = "0.1.42"
 polkadot-primitives = { path = "../../../primitives" }
 polkadot-node-primitives = { path = "../../primitives" }
 polkadot-node-jaeger = { path = "../../jaeger" }
 parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] }
 sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" }
 strum = { version = "0.20", features = ["derive"] }
 futures = "0.3.15"
 thiserror = "1.0.23"
diff --git a/polkadot/node/network/protocol/src/authority_discovery.rs b/polkadot/node/network/protocol/src/authority_discovery.rs
new file mode 100644
index 0000000000000000000000000000000000000000..c9946f5c68d98af0285322aed5b9f9b31f69fd07
--- /dev/null
+++ b/polkadot/node/network/protocol/src/authority_discovery.rs
@@ -0,0 +1,48 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Authority discovery service interfacing.
+
+use std::fmt::Debug;
+
+use async_trait::async_trait;
+
+use sc_authority_discovery::Service as AuthorityDiscoveryService;
+
+use polkadot_primitives::v1::AuthorityDiscoveryId;
+use sc_network::{Multiaddr, PeerId};
+
+/// An abstraction over the authority discovery service.
+///
+/// Needed for mocking in tests mostly.
+#[async_trait]
+pub trait AuthorityDiscovery: Send + Debug + 'static {
+	/// Get the addresses for the given [`AuthorityId`] from the local address cache.
+	async fn get_addresses_by_authority_id(&mut self, authority: AuthorityDiscoveryId) -> Option<Vec<Multiaddr>>;
+	/// Get the [`AuthorityId`] for the given [`PeerId`] from the local address cache.
+	async fn get_authority_id_by_peer_id(&mut self, peer_id: PeerId) -> Option<AuthorityDiscoveryId>;
+}
+
+#[async_trait]
+impl AuthorityDiscovery for AuthorityDiscoveryService {
+	async fn get_addresses_by_authority_id(&mut self, authority: AuthorityDiscoveryId) -> Option<Vec<Multiaddr>> {
+		AuthorityDiscoveryService::get_addresses_by_authority_id(self, authority).await
+	}
+
+	async fn get_authority_id_by_peer_id(&mut self, peer_id: PeerId) -> Option<AuthorityDiscoveryId> {
+		AuthorityDiscoveryService::get_authority_id_by_peer_id(self, peer_id).await
+	}
+}
diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs
index 058c2dbab14fc2f3f5f77dfc460f844da21c48c2..8ce017f8bd5c8577ae657e7913d75b5f08775b96 100644
--- a/polkadot/node/network/protocol/src/lib.rs
+++ b/polkadot/node/network/protocol/src/lib.rs
@@ -38,6 +38,9 @@ pub mod peer_set;
 /// Request/response protocols used in Polkadot.
 pub mod request_response;
 
+/// Accessing authority discovery service
+pub mod authority_discovery;
+
 /// A version of the protocol.
 pub type ProtocolVersion = u32;
 /// The minimum amount of peers to send gossip messages to.
diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs
index 2ad578891032a14fc3c4cf362aa85c2181ae54b3..c264c5d2ee0c79993c709f704871fd3b0f620003 100644
--- a/polkadot/node/network/protocol/src/request_response/mod.rs
+++ b/polkadot/node/network/protocol/src/request_response/mod.rs
@@ -66,6 +66,8 @@ pub enum Protocol {
 	AvailableDataFetching,
 	/// Fetching of statements that are too large for gossip.
 	StatementFetching,
+	/// Sending of dispute statements with application level confirmations.
+	DisputeSending,
 }
 
 
@@ -98,7 +100,7 @@ const STATEMENTS_TIMEOUT: Duration = Duration::from_secs(1);
 /// We don't want a slow peer to slow down all the others, at the same time we want to get out the
 /// data quickly in full to at least some peers (as this will reduce load on us as they then can
 /// start serving the data). So this value is a tradeoff. 3 seems to be sensible. So we would need
-/// to have 3 slow noded connected, to delay transfer for others by `STATEMENTS_TIMEOUT`.
+/// to have 3 slow nodes connected, to delay transfer for others by `STATEMENTS_TIMEOUT`.
 pub const MAX_PARALLEL_STATEMENT_REQUESTS: u32 = 3;
 
 impl Protocol {
@@ -106,9 +108,6 @@ impl Protocol {
 	///
 	/// Returns a receiver for messages received on this protocol and the requested
 	/// `ProtocolConfig`.
-	///
-	/// See also `dispatcher::RequestDispatcher`,  which makes use of this function and provides a more
-	/// high-level interface.
 	pub fn get_config(
 		self,
 	) -> (
@@ -167,6 +166,17 @@ impl Protocol {
 				request_timeout: Duration::from_secs(1),
 				inbound_queue: Some(tx),
 			},
+			Protocol::DisputeSending => RequestResponseConfig {
+				name: p_name,
+				max_request_size: 1_000,
+				/// Responses are just confirmation, in essence not even a bit. So 100 seems
+				/// plenty.
+				max_response_size: 100,
+				/// We can have relative large timeouts here, there is no value of hitting a
+				/// timeout as we want to get statements through to each node in any case.
+				request_timeout: Duration::from_secs(12),
+				inbound_queue: Some(tx),
+			},
 		};
 		(rx, cfg)
 	}
@@ -195,7 +205,7 @@ impl Protocol {
 				// This is just a guess/estimate, with the following considerations: If we are
 				// faster than that, queue size will stay low anyway, even if not - requesters will
 				// get an immediate error, but if we are slower, requesters will run in a timeout -
-				// waisting precious time.
+				// wasting precious time.
 				let available_bandwidth = 7 * MIN_BANDWIDTH_BYTES / 10;
 				let size = u64::saturating_sub(
 					STATEMENTS_TIMEOUT.as_millis() as u64 * available_bandwidth / (1000 * MAX_CODE_SIZE as u64),
@@ -207,6 +217,10 @@ impl Protocol {
 				);
 				size as usize
 			}
+			// Incoming requests can get bursty, we should also be able to handle them fast on
+			// average, so something in the ballpark of 100 should be fine. Nodes will retry on
+			// failure, so having a good value here is mostly about performance tuning.
+			Protocol::DisputeSending => 100,
 		}
 	}
 
@@ -223,6 +237,7 @@ impl Protocol {
 			Protocol::PoVFetching => "/polkadot/req_pov/1",
 			Protocol::AvailableDataFetching => "/polkadot/req_available_data/1",
 			Protocol::StatementFetching => "/polkadot/req_statement/1",
+			Protocol::DisputeSending => "/polkadot/send_dispute/1",
 		}
 	}
 }
diff --git a/polkadot/node/network/protocol/src/request_response/request.rs b/polkadot/node/network/protocol/src/request_response/request.rs
index 4e6456627bd5c825b527805ba9e24621dbace4de..1d26ddf1f429c2ede499e11f15a3a5bdb85cb6e3 100644
--- a/polkadot/node/network/protocol/src/request_response/request.rs
+++ b/polkadot/node/network/protocol/src/request_response/request.rs
@@ -14,6 +14,8 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
+use std::marker::PhantomData;
+
 use futures::channel::oneshot;
 use futures::prelude::Future;
 
@@ -54,6 +56,8 @@ pub enum Requests {
 	AvailableDataFetching(OutgoingRequest<v1::AvailableDataFetchingRequest>),
 	/// Requests for fetching large statements as part of statement distribution.
 	StatementFetching(OutgoingRequest<v1::StatementFetchingRequest>),
+	/// Requests for notifying about an ongoing dispute.
+	DisputeSending(OutgoingRequest<v1::DisputeRequest>),
 }
 
 impl Requests {
@@ -65,6 +69,7 @@ impl Requests {
 			Self::PoVFetching(_) => Protocol::PoVFetching,
 			Self::AvailableDataFetching(_) => Protocol::AvailableDataFetching,
 			Self::StatementFetching(_) => Protocol::StatementFetching,
+			Self::DisputeSending(_) => Protocol::DisputeSending,
 		}
 	}
 
@@ -82,12 +87,13 @@ impl Requests {
 			Self::PoVFetching(r) => r.encode_request(),
 			Self::AvailableDataFetching(r) => r.encode_request(),
 			Self::StatementFetching(r) => r.encode_request(),
+			Self::DisputeSending(r) => r.encode_request(),
 		}
 	}
 }
 
 /// Potential recipients of an outgoing request.
-#[derive(Debug, Eq, Hash, PartialEq)]
+#[derive(Debug, Eq, Hash, PartialEq, Clone)]
 pub enum Recipient {
 	/// Recipient is a regular peer and we know its peer id.
 	Peer(PeerId),
@@ -131,6 +137,18 @@ pub enum RequestError {
 	Canceled(#[source] oneshot::Canceled),
 }
 
+/// Things that can go wrong when decoding an incoming request.
+#[derive(Debug, Error)]
+pub enum ReceiveError {
+	/// Decoding failed, we were able to change the peer's reputation accordingly.
+	#[error("Decoding request failed for peer {0}.")]
+	DecodingError(PeerId, #[source] DecodingError),
+
+	/// Decoding failed, but sending reputation change failed.
+	#[error("Decoding request failed for peer {0}, and changing reputation failed.")]
+	DecodingErrorNoReputationChange(PeerId, #[source] DecodingError),
+}
+
 /// Responses received for an `OutgoingRequest`.
 pub type OutgoingResult<Res> = Result<Res, RequestError>;
 
@@ -205,43 +223,22 @@ pub struct IncomingRequest<Req> {
 	pub peer: PeerId,
 	/// The sent request.
 	pub payload: Req,
-	pending_response: oneshot::Sender<netconfig::OutgoingResponse>,
+	/// Sender for sending response back.
+	pub pending_response: OutgoingResponseSender<Req>,
 }
 
-/// Typed variant of [`netconfig::OutgoingResponse`].
-///
-/// Responses to `IncomingRequest`s.
-pub struct OutgoingResponse<Response> {
-	/// The payload of the response.
-	pub result: Result<Response, ()>,
-
-	/// Reputation changes accrued while handling the request. To be applied to the reputation of
-	/// the peer sending the request.
-	pub reputation_changes: Vec<UnifiedReputationChange>,
-
-	/// If provided, the `oneshot::Sender` will be notified when the request has been sent to the
-	/// peer.
-	pub sent_feedback: Option<oneshot::Sender<()>>,
+/// Sender for sendinb back responses on an `IncomingRequest`.
+#[derive(Debug)]
+pub struct OutgoingResponseSender<Req>{
+	pending_response: oneshot::Sender<netconfig::OutgoingResponse>,
+	phantom: PhantomData<Req>,
 }
 
-impl<Req> IncomingRequest<Req>
+impl<Req> OutgoingResponseSender<Req> 
 where
-	Req: IsRequest,
+	Req: IsRequest + Decode,
 	Req::Response: Encode,
 {
-	/// Create new `IncomingRequest`.
-	pub fn new(
-		peer: PeerId,
-		payload: Req,
-		pending_response: oneshot::Sender<netconfig::OutgoingResponse>,
-	) -> Self {
-		Self {
-			peer,
-			payload,
-			pending_response,
-		}
-	}
-
 	/// Send the response back.
 	///
 	/// On success we return Ok(()), on error we return the not sent `Response`.
@@ -284,6 +281,100 @@ where
 	}
 }
 
+/// Typed variant of [`netconfig::OutgoingResponse`].
+///
+/// Responses to `IncomingRequest`s.
+pub struct OutgoingResponse<Response> {
+	/// The payload of the response.
+	///
+	/// `Err(())` if none is available e.g. due an error while handling the request.
+	pub result: Result<Response, ()>,
+
+	/// Reputation changes accrued while handling the request. To be applied to the reputation of
+	/// the peer sending the request.
+	pub reputation_changes: Vec<UnifiedReputationChange>,
+
+	/// If provided, the `oneshot::Sender` will be notified when the request has been sent to the
+	/// peer.
+	pub sent_feedback: Option<oneshot::Sender<()>>,
+}
+
+impl<Req> IncomingRequest<Req>
+where
+	Req: IsRequest + Decode,
+	Req::Response: Encode,
+{
+	/// Create new `IncomingRequest`.
+	pub fn new(
+		peer: PeerId,
+		payload: Req,
+		pending_response: oneshot::Sender<netconfig::OutgoingResponse>,
+	) -> Self {
+		Self {
+			peer,
+			payload,
+			pending_response: OutgoingResponseSender {
+				pending_response,
+				phantom: PhantomData {},
+			},
+		}
+	}
+
+	/// Try building from raw substrate request.
+	///
+	/// This function will fail if the request cannot be decoded and will apply passed in
+	/// reputation changes in that case.
+	///
+	/// Params:
+	///		- The raw request to decode
+	///		- Reputation changes to apply for the peer in case decoding fails.
+	pub fn try_from_raw(
+		raw: sc_network::config::IncomingRequest,
+		reputation_changes: Vec<UnifiedReputationChange>
+	) -> Result<Self, ReceiveError> {
+		let sc_network::config::IncomingRequest {
+			payload,
+			peer,
+			pending_response,
+		} = raw;
+		let payload = match Req::decode(&mut payload.as_ref()) {
+			Ok(payload) => payload,
+			Err(err) => {
+				let reputation_changes = reputation_changes
+					.into_iter()
+					.map(|r| r.into_base_rep())
+					.collect();
+				let response = sc_network::config::OutgoingResponse {
+					result: Err(()),
+					reputation_changes,
+					sent_feedback: None,
+				};
+
+				if let Err(_) = pending_response.send(response) {
+					return Err(ReceiveError::DecodingErrorNoReputationChange(peer, err))
+				}
+				return Err(ReceiveError::DecodingError(peer, err))
+			}
+		};
+		Ok(Self::new(peer, payload, pending_response))
+	}
+
+	/// Send the response back.
+	///
+	/// Calls [`OutgoingResponseSender::send_response`].
+	pub fn send_response(self, resp: Req::Response) -> Result<(), Req::Response> {
+		self.pending_response.send_response(resp)
+	}
+
+	/// Send response with additional options.
+	///
+	/// Calls [`OutgoingResponseSender::send_outgoing_response`].
+	pub fn send_outgoing_response(self, resp: OutgoingResponse<<Req as IsRequest>::Response>)
+		-> Result<(), ()> {
+		self.pending_response.send_outgoing_response(resp)
+	}
+}
+
 /// Future for actually receiving a typed response for an OutgoingRequest.
 async fn receive_response<Req>(
 	rec: oneshot::Receiver<Result<Vec<u8>, network::RequestFailure>>,
diff --git a/polkadot/node/network/protocol/src/request_response/v1.rs b/polkadot/node/network/protocol/src/request_response/v1.rs
index 89c7327735523d3c2237e5c893a6b338c4365e6b..15307d3362b7c4c5251b44fa2eee6c5d99dd557d 100644
--- a/polkadot/node/network/protocol/src/request_response/v1.rs
+++ b/polkadot/node/network/protocol/src/request_response/v1.rs
@@ -20,7 +20,7 @@ use parity_scale_codec::{Decode, Encode};
 
 use polkadot_primitives::v1::{CandidateHash, CandidateReceipt, CommittedCandidateReceipt, Hash, ValidatorIndex};
 use polkadot_primitives::v1::Id as ParaId;
-use polkadot_node_primitives::{AvailableData, PoV, ErasureChunk};
+use polkadot_node_primitives::{AvailableData, DisputeMessage, ErasureChunk, PoV, UncheckedDisputeMessage};
 
 use super::request::IsRequest;
 use super::Protocol;
@@ -192,3 +192,28 @@ impl IsRequest for StatementFetchingRequest {
 	type Response = StatementFetchingResponse;
 	const PROTOCOL: Protocol = Protocol::StatementFetching;
 }
+
+/// A dispute request.
+///
+/// Contains an invalid vote a valid one for a particular candidate in a given session.
+#[derive(Clone, Encode, Decode, Debug)]
+pub struct DisputeRequest(pub UncheckedDisputeMessage);
+
+impl From<DisputeMessage> for DisputeRequest {
+	fn from(msg: DisputeMessage) -> Self {
+		Self(msg.into())
+	}
+}
+
+/// Possible responses to a `DisputeRequest`.
+#[derive(Encode, Decode, Debug, PartialEq, Eq)]
+pub enum DisputeResponse {
+	/// Recipient successfully processed the dispute request.
+	#[codec(index = 0)]
+	Confirmed
+}
+
+impl IsRequest for DisputeRequest {
+	type Response = DisputeResponse;
+	const PROTOCOL: Protocol = Protocol::DisputeSending;
+}
diff --git a/polkadot/node/network/statement-distribution/src/error.rs b/polkadot/node/network/statement-distribution/src/error.rs
index 097b3709997577399cf4a259ae808c5217dbd319..af1011ebd5ea177f232dad06459085870193c635 100644
--- a/polkadot/node/network/statement-distribution/src/error.rs
+++ b/polkadot/node/network/statement-distribution/src/error.rs
@@ -60,11 +60,11 @@ impl From<runtime::Error> for Error {
 #[derive(Debug, Error)]
 pub enum Fatal {
 	/// Requester channel is never closed.
-	#[error("Requester receiver stream finished.")]
+	#[error("Requester receiver stream finished")]
 	RequesterReceiverFinished,
 
 	/// Responder channel is never closed.
-	#[error("Responder receiver stream finished.")]
+	#[error("Responder receiver stream finished")]
 	ResponderReceiverFinished,
 
 	/// Spawning a running task failed.
diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs
index 6588d996ce2215a4737a164bcc7fe403fa6f7ab6..81e87d8ba35c00257e878d37f125f3474fd54b1d 100644
--- a/polkadot/node/network/statement-distribution/src/lib.rs
+++ b/polkadot/node/network/statement-distribution/src/lib.rs
@@ -580,7 +580,7 @@ struct FetchingInfo {
 }
 
 /// Messages to be handled in this subsystem.
-enum Message {
+enum MuxedMessage {
 	/// Messages from other subsystems.
 	Subsystem(FatalResult<FromOverseer<StatementDistributionMessage>>),
 	/// Messages from spawned requester background tasks.
@@ -589,12 +589,12 @@ enum Message {
 	Responder(Option<ResponderMessage>)
 }
 
-impl Message {
+impl MuxedMessage {
 	async fn receive(
 		ctx: &mut (impl SubsystemContext<Message = StatementDistributionMessage> + overseer::SubsystemContext<Message = StatementDistributionMessage>),
 		from_requester: &mut mpsc::Receiver<RequesterMessage>,
 		from_responder: &mut mpsc::Receiver<ResponderMessage>,
-	) -> Message {
+	) -> MuxedMessage {
 		// We are only fusing here to make `select` happy, in reality we will quit if one of those
 		// streams end:
 		let from_overseer = ctx.recv().fuse();
@@ -602,9 +602,9 @@ impl Message {
 		let from_responder = from_responder.next();
 		futures::pin_mut!(from_overseer, from_requester, from_responder);
 		futures::select! {
-			msg = from_overseer => Message::Subsystem(msg.map_err(Fatal::SubsystemReceive)),
-			msg = from_requester => Message::Requester(msg),
-			msg = from_responder => Message::Responder(msg),
+			msg = from_overseer => MuxedMessage::Subsystem(msg.map_err(Fatal::SubsystemReceive)),
+			msg = from_requester => MuxedMessage::Requester(msg),
+			msg = from_responder => MuxedMessage::Responder(msg),
 		}
 	}
 }
@@ -1614,9 +1614,9 @@ impl StatementDistribution {
 		let (res_sender, mut res_receiver) = mpsc::channel(1);
 
 		loop {
-			let message = Message::receive(&mut ctx, &mut req_receiver, &mut res_receiver).await;
+			let message = MuxedMessage::receive(&mut ctx, &mut req_receiver, &mut res_receiver).await;
 			match message {
-				Message::Subsystem(result) => {
+				MuxedMessage::Subsystem(result) => {
 					let result = self.handle_subsystem_message(
 						&mut ctx,
 						&mut runtime,
@@ -1637,7 +1637,7 @@ impl StatementDistribution {
 							tracing::debug!(target: LOG_TARGET, ?error)
 					}
 				}
-				Message::Requester(result) => {
+				MuxedMessage::Requester(result) => {
 					let result = self.handle_requester_message(
 						&mut ctx,
 						&gossip_peers,
@@ -1649,7 +1649,7 @@ impl StatementDistribution {
 					.await;
 					log_error(result.map_err(From::from), "handle_requester_message")?;
 				}
-				Message::Responder(result) => {
+				MuxedMessage::Responder(result) => {
 					let result = self.handle_responder_message(
 						&peers,
 						&mut active_heads,
@@ -1856,8 +1856,8 @@ impl StatementDistribution {
 						"New active leaf",
 					);
 
-					let session_index = runtime.get_session_index(ctx, relay_parent).await?;
-					let info = runtime.get_session_info_by_index(ctx, relay_parent, session_index).await?;
+					let session_index = runtime.get_session_index(ctx.sender(), relay_parent).await?;
+					let info = runtime.get_session_info_by_index(ctx.sender(), relay_parent, session_index).await?;
 					let session_info = &info.session_info;
 
 					active_heads.entry(relay_parent)
@@ -1899,7 +1899,7 @@ impl StatementDistribution {
 						}
 					}
 
-					let info = runtime.get_session_info(ctx, relay_parent).await?;
+					let info = runtime.get_session_info(ctx.sender(), relay_parent).await?;
 					let session_info = &info.session_info;
 					let validator_info = &info.validator_info;
 
diff --git a/polkadot/node/network/statement-distribution/src/responder.rs b/polkadot/node/network/statement-distribution/src/responder.rs
index da7e914edc93bb961a5f86635c9d39867a8b0782..bcaefdfcde52f2458c8ab12aeb486785ef572502 100644
--- a/polkadot/node/network/statement-distribution/src/responder.rs
+++ b/polkadot/node/network/statement-distribution/src/responder.rs
@@ -16,8 +16,6 @@
 
 use futures::{SinkExt, StreamExt, channel::{mpsc, oneshot}, stream::FuturesUnordered};
 
-use parity_scale_codec::Decode;
-
 use polkadot_node_network_protocol::{
 	PeerId, UnifiedReputationChange as Rep,
 	request_response::{
@@ -85,35 +83,26 @@ pub async fn respond(
 			Some(v) => v,
 		};
 
-		let sc_network::config::IncomingRequest {
-			payload,
-			peer,
-			pending_response,
-		} = raw;
-
-		let payload = match StatementFetchingRequest::decode(&mut payload.as_ref()) {
+		let req =
+			match IncomingRequest::<StatementFetchingRequest>::try_from_raw(
+				raw,
+				vec![COST_INVALID_REQUEST],
+			) {
 			Err(err) => {
 				tracing::debug!(
 					target: LOG_TARGET,
 					?err,
 					"Decoding request failed"
 				);
-				report_peer(pending_response, COST_INVALID_REQUEST);
 				continue
 			}
 			Ok(payload) => payload,
 		};
 
-		let req = IncomingRequest::new(
-			peer,
-			payload,
-			pending_response
-		);
-
 		let (tx, rx) = oneshot::channel();
 		if let Err(err) = sender.feed(
 			ResponderMessage::GetData {
-				requesting_peer: peer,
+				requesting_peer: req.peer,
 				relay_parent: req.payload.relay_parent,
 				candidate_hash: req.payload.candidate_hash,
 				tx,
@@ -152,20 +141,3 @@ pub async fn respond(
 		}
 	}
 }
-
-/// Report peer who sent us a request.
-fn report_peer(
-	tx: oneshot::Sender<sc_network::config::OutgoingResponse>,
-	rep: Rep,
-) {
-	if let Err(_) = tx.send(sc_network::config::OutgoingResponse {
-		result: Err(()),
-		reputation_changes: vec![rep.into_base_rep()],
-		sent_feedback: None,
-	}) {
-		tracing::debug!(
-			target: LOG_TARGET,
-			"Reporting peer failed."
-		);
-	}
-}
diff --git a/polkadot/node/network/statement-distribution/src/tests.rs b/polkadot/node/network/statement-distribution/src/tests.rs
index 32a7eb77a168bac79056837f0d8e80ec629bf5c1..a59957d8af8261614352fd135afd609d742bfa6c 100644
--- a/polkadot/node/network/statement-distribution/src/tests.rs
+++ b/polkadot/node/network/statement-distribution/src/tests.rs
@@ -18,6 +18,7 @@ use std::time::Duration;
 use std::sync::Arc;
 use std::iter::FromIterator as _;
 use parity_scale_codec::{Decode, Encode};
+use polkadot_node_subsystem_test_helpers::mock::make_ferdie_keystore;
 use super::*;
 use sp_keyring::Sr25519Keyring;
 use sp_application_crypto::{AppKey, sr25519::Pair, Pair as TraitPair};
@@ -1704,14 +1705,3 @@ fn make_session_info(validators: Vec<Pair>, groups: Vec<Vec<u32>>) -> SessionInf
 		needed_approvals: 0,
 	}
 }
-
-pub fn make_ferdie_keystore() -> SyncCryptoStorePtr {
-	let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
-	SyncCryptoStore::sr25519_generate_new(
-		&*keystore,
-		ValidatorId::ID,
-		Some(&Sr25519Keyring::Ferdie.to_seed()),
-		)
-		.expect("Insert key into keystore");
-	keystore
-}
diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs
index 7867ed309aecbe6983682967eac608c1bdf53a5c..ab30b9557df538cc10718870fb4bfc4a24f9414e 100644
--- a/polkadot/node/overseer/src/lib.rs
+++ b/polkadot/node/overseer/src/lib.rs
@@ -91,6 +91,7 @@ use polkadot_node_subsystem_types::messages::{
 	ApprovalVotingMessage, GossipSupportMessage,
 	NetworkBridgeEvent,
 	DisputeParticipationMessage, DisputeCoordinatorMessage, ChainSelectionMessage,
+	DisputeDistributionMessage,
 };
 pub use polkadot_node_subsystem_types::{
 	OverseerSignal,
@@ -395,6 +396,9 @@ pub struct Overseer<SupportsParachains> {
 	#[subsystem(no_dispatch, wip, DisputeParticipationMessage)]
 	dispute_participation: DisputeParticipation,
 
+	#[subsystem(no_dispatch, wip, DisputeDistributionMessage)]
+	dipute_distribution: DisputeDistribution,
+
 	#[subsystem(no_dispatch, wip, ChainSelectionMessage)]
 	chain_selection: ChainSelection,
 
diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml
index 7cefeab9ba036945704e01b7b4f71bf3dcdbfae5..40e1f79ccde6fae26e51c86cc90f2ce057a15bda 100644
--- a/polkadot/node/primitives/Cargo.toml
+++ b/polkadot/node/primitives/Cargo.toml
@@ -20,6 +20,7 @@ sp-maybe-compressed-blob  = { git = "https://github.com/paritytech/substrate", b
 polkadot-parachain = { path = "../../parachain", default-features = false }
 schnorrkel = "0.9.1"
 thiserror = "1.0.22"
+tracing = "0.1.26"
 serde = { version = "1.0.123", features = ["derive"] }
 
 [target.'cfg(not(target_os = "unknown"))'.dependencies]
diff --git a/polkadot/node/primitives/src/disputes/message.rs b/polkadot/node/primitives/src/disputes/message.rs
new file mode 100644
index 0000000000000000000000000000000000000000..d703eb456ce8773d67df3f9fe288ef9ff9f42384
--- /dev/null
+++ b/polkadot/node/primitives/src/disputes/message.rs
@@ -0,0 +1,265 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+//! `DisputeMessage` and associated types.
+//!
+//! A `DisputeMessage` is a message that indicates a node participating in a dispute and is used
+//! for interfacing with `DisputeDistribution` to send out our vote in a spam detectable way.
+
+use thiserror::Error;
+
+use parity_scale_codec::{Decode, Encode};
+
+use polkadot_primitives::v1::{CandidateReceipt, DisputeStatement, SessionIndex, SessionInfo, ValidatorIndex};
+
+use super::{InvalidDisputeVote, SignedDisputeStatement, ValidDisputeVote};
+
+/// A dispute initiating/participtating message that is guaranteed to have been built from signed
+/// statements.
+///
+/// And most likely has been constructed correctly. This is used with
+/// `DisputeDistributionMessage::SendDispute` for sending out votes.
+#[derive(Debug, Clone)]
+pub struct DisputeMessage(UncheckedDisputeMessage);
+
+/// A `DisputeMessage` where signatures of statements have not yet been checked.
+#[derive(Clone, Encode, Decode, Debug)]
+pub struct UncheckedDisputeMessage {
+	/// The candidate being disputed.
+	pub candidate_receipt: CandidateReceipt,
+
+	/// The session the candidate appears in.
+	pub session_index: SessionIndex,
+
+	/// The invalid vote data that makes up this dispute.
+	pub invalid_vote: InvalidDisputeVote,
+
+	/// The valid vote that makes this dispute request valid.
+	pub valid_vote: ValidDisputeVote,
+}
+
+/// Things that can go wrong when constructing a `DisputeMessage`.
+#[derive(Error, Debug)]
+pub enum Error {
+	/// The statements concerned different candidates.
+	#[error("Candidate hashes of the two votes did not match up")]
+	CandidateHashMismatch,
+
+	/// The statements concerned different sessions.
+	#[error("Session indices of the two votes did not match up")]
+	SessionIndexMismatch,
+
+	/// The valid statement validator key did not correspond to passed in `SessionInfo`.
+	#[error("Valid statement validator key did not match session information")]
+	InvalidValidKey,
+
+	/// The invalid statement validator key did not correspond to passed in `SessionInfo`.
+	#[error("Invalid statement validator key did not match session information")]
+	InvalidInvalidKey,
+
+	/// Provided receipt had different hash than the `CandidateHash` in the signed statements.
+	#[error("Hash of candidate receipt did not match provided hash")]
+	InvalidCandidateReceipt,
+
+	/// Valid statement should have `ValidDisputeStatementKind`.
+	#[error("Valid statement has kind `invalid`")]
+	ValidStatementHasInvalidKind,
+
+	/// Invalid statement should have `InvalidDisputeStatementKind`.
+	#[error("Invalid statement has kind `valid`")]
+	InvalidStatementHasValidKind,
+
+	/// Provided index could not be found in `SessionInfo`.
+	#[error("The valid statement had an invalid validator index")]
+	ValidStatementInvalidValidatorIndex,
+
+	/// Provided index could not be found in `SessionInfo`.
+	#[error("The invalid statement had an invalid validator index")]
+	InvalidStatementInvalidValidatorIndex,
+}
+
+impl DisputeMessage {
+
+	/// Build a `SignedDisputeMessage` and check what can be checked.
+	///
+	/// This function checks that:
+	///
+	/// - both statements concern the same candidate
+	/// - both statements concern the same session
+	/// - the invalid statement is indeed an invalid one
+	/// - the valid statement is indeed a valid one
+	/// - The passed `CandidateReceipt` has the correct hash (as signed in the statements).
+	/// - the given validator indeces match with the given `ValidatorId`s in the statements,
+	///   given a `SessionInfo`.
+	///
+	/// We don't check whether the given `SessionInfo` matches the `SessionIndex` in the
+	/// statements, because we can't without doing a runtime query. Nevertheless this smart
+	/// constructor gives relative strong guarantees that the resulting `SignedDisputeStatement` is
+	/// valid and good.  Even the passed `SessionInfo` is most likely right if this function
+	/// returns `Some`, because otherwise the passed `ValidatorId`s in the `SessionInfo` at
+	/// their given index would very likely not match the `ValidatorId`s in the statements.
+	///
+	/// So in summary, this smart constructor should be smart enough to prevent from almost all
+	/// programming errors that one could realistically make here.
+	pub fn from_signed_statements(
+		valid_statement: SignedDisputeStatement,
+		valid_index: ValidatorIndex,
+		invalid_statement: SignedDisputeStatement,
+		invalid_index: ValidatorIndex,
+		candidate_receipt: CandidateReceipt,
+		session_info: &SessionInfo,
+	) -> Result<Self, Error> {
+		let candidate_hash = *valid_statement.candidate_hash();
+		// Check statements concern same candidate:
+		if candidate_hash != *invalid_statement.candidate_hash() {
+			return Err(Error::CandidateHashMismatch)
+		}
+
+		let session_index = valid_statement.session_index();
+		if session_index != invalid_statement.session_index() {
+			return Err(Error::SessionIndexMismatch)
+		}
+
+		let valid_id = session_info
+			.validators
+			.get(valid_index.0 as usize)
+			.ok_or(Error::ValidStatementInvalidValidatorIndex)?;
+		let invalid_id = session_info
+			.validators
+			.get(invalid_index.0 as usize)
+			.ok_or(Error::InvalidStatementInvalidValidatorIndex)?;
+
+		if valid_id != valid_statement.validator_public() {
+			return Err(Error::InvalidValidKey)
+		}
+
+		if invalid_id != invalid_statement.validator_public() {
+			return Err(Error::InvalidInvalidKey)
+		}
+
+		if candidate_receipt.hash() != candidate_hash {
+			return Err(Error::InvalidCandidateReceipt)
+		}
+
+		let valid_kind = match valid_statement.statement() {
+			DisputeStatement::Valid(v) => v,
+			_ => {
+				return Err(Error::ValidStatementHasInvalidKind)
+			}
+		};
+
+		let invalid_kind = match invalid_statement.statement() {
+			DisputeStatement::Invalid(v) => v,
+			_ => {
+				return Err(Error::InvalidStatementHasValidKind)
+			}
+		};
+
+		let valid_vote = ValidDisputeVote {
+			validator_index: valid_index,
+			signature: valid_statement.validator_signature().clone(),
+			kind: valid_kind.clone(),
+		};
+
+		let invalid_vote = InvalidDisputeVote {
+			validator_index: invalid_index,
+			signature: invalid_statement.validator_signature().clone(),
+			kind: invalid_kind.clone(),
+		};
+
+		Ok(DisputeMessage(UncheckedDisputeMessage {
+			candidate_receipt,
+			session_index,
+			valid_vote,
+			invalid_vote,
+		}))
+	}
+
+	/// Read only access to the candidate receipt.
+	pub fn candidate_receipt(&self) -> &CandidateReceipt {
+		&self.0.candidate_receipt
+	}
+
+	/// Read only access to the `SessionIndex`.
+	pub fn session_index(&self) -> SessionIndex {
+		self.0.session_index
+	}
+
+	/// Read only access to the invalid vote.
+	pub fn invalid_vote(&self) -> &InvalidDisputeVote {
+		&self.0.invalid_vote
+	}
+
+	/// Read only access to the valid vote.
+	pub fn valid_vote(&self) -> &ValidDisputeVote {
+		&self.0.valid_vote
+	}
+}
+
+impl UncheckedDisputeMessage {
+	/// Try to recover the two signed dispute votes from an UncheckedDisputeMessage.
+	pub fn try_into_signed_votes(self, session_info: &SessionInfo)
+		-> Result<(CandidateReceipt, (SignedDisputeStatement, ValidatorIndex), (SignedDisputeStatement, ValidatorIndex)), ()>
+	{
+		let Self {
+			candidate_receipt,
+			session_index,
+			valid_vote,
+			invalid_vote,
+		} = self;
+		let candidate_hash = candidate_receipt.hash();
+
+		let vote_valid = {
+			let ValidDisputeVote {
+				validator_index,
+				signature,
+				kind,
+			} = valid_vote;
+			let validator_public = session_info.validators.get(validator_index.0 as usize).ok_or(())?.clone();
+
+			(
+				SignedDisputeStatement::new_checked(
+					DisputeStatement::Valid(kind), candidate_hash, session_index, validator_public, signature
+				)?,
+				validator_index
+			)
+		};
+
+		let vote_invalid = {
+			let InvalidDisputeVote {
+				validator_index,
+				signature,
+				kind,
+			} = invalid_vote;
+			let validator_public = session_info.validators.get(validator_index.0 as usize).ok_or(())?.clone();
+
+			(
+				SignedDisputeStatement::new_checked(
+					DisputeStatement::Invalid(kind), candidate_hash, session_index, validator_public, signature
+				)?,
+				validator_index
+			)
+		};
+
+		Ok((candidate_receipt, vote_valid, vote_invalid))
+	}
+}
+
+impl From<DisputeMessage> for UncheckedDisputeMessage {
+	fn from(message: DisputeMessage) -> Self {
+		message.0
+	}
+}
diff --git a/polkadot/node/primitives/src/disputes/mod.rs b/polkadot/node/primitives/src/disputes/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..faae7f2ffb1cbe9d474002ff71511bcbf365f98f
--- /dev/null
+++ b/polkadot/node/primitives/src/disputes/mod.rs
@@ -0,0 +1,177 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use std::convert::TryInto;
+
+use parity_scale_codec::{Decode, Encode};
+
+use sp_application_crypto::AppKey;
+use sp_keystore::{CryptoStore, SyncCryptoStorePtr, Error as KeystoreError};
+
+use polkadot_primitives::v1::{CandidateHash, CandidateReceipt, DisputeStatement, InvalidDisputeStatementKind, SessionIndex, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature};
+
+/// `DisputeMessage` and related types.
+mod message;
+pub use message::{DisputeMessage, UncheckedDisputeMessage, Error as DisputeMessageCheckError};
+
+/// A checked dispute statement from an associated validator.
+#[derive(Debug, Clone)]
+pub struct SignedDisputeStatement {
+	dispute_statement: DisputeStatement,
+	candidate_hash: CandidateHash,
+	validator_public: ValidatorId,
+	validator_signature: ValidatorSignature,
+	session_index: SessionIndex,
+}
+
+/// Tracked votes on candidates, for the purposes of dispute resolution.
+#[derive(Debug, Clone)]
+pub struct CandidateVotes {
+	/// The receipt of the candidate itself.
+	pub candidate_receipt: CandidateReceipt,
+	/// Votes of validity, sorted by validator index.
+	pub valid: Vec<(ValidDisputeStatementKind, ValidatorIndex, ValidatorSignature)>,
+	/// Votes of invalidity, sorted by validator index.
+	pub invalid: Vec<(InvalidDisputeStatementKind, ValidatorIndex, ValidatorSignature)>,
+}
+
+impl CandidateVotes {
+	/// Get the set of all validators who have votes in the set, ascending.
+	pub fn voted_indices(&self) -> Vec<ValidatorIndex> {
+		let mut v: Vec<_> = self.valid.iter().map(|x| x.1).chain(
+			self.invalid.iter().map(|x| x.1)
+		).collect();
+
+		v.sort();
+		v.dedup();
+
+		v
+	}
+}
+
+impl SignedDisputeStatement {
+	/// Create a new `SignedDisputeStatement`, which is only possible by checking the signature.
+	pub fn new_checked(
+		dispute_statement: DisputeStatement,
+		candidate_hash: CandidateHash,
+		session_index: SessionIndex,
+		validator_public: ValidatorId,
+		validator_signature: ValidatorSignature,
+	) -> Result<Self, ()> {
+		dispute_statement.check_signature(
+			&validator_public,
+			candidate_hash,
+			session_index,
+			&validator_signature,
+		).map(|_| SignedDisputeStatement {
+			dispute_statement,
+			candidate_hash,
+			validator_public,
+			validator_signature,
+			session_index,
+		})
+	}
+
+	/// Sign this statement with the given keystore and key. Pass `valid = true` to
+	/// indicate validity of the candidate, and `valid = false` to indicate invalidity.
+	pub async fn sign_explicit(
+		keystore: &SyncCryptoStorePtr,
+		valid: bool,
+		candidate_hash: CandidateHash,
+		session_index: SessionIndex,
+		validator_public: ValidatorId,
+	) -> Result<Option<Self>, KeystoreError> {
+		let dispute_statement = if valid {
+			DisputeStatement::Valid(ValidDisputeStatementKind::Explicit)
+		} else {
+			DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit)
+		};
+
+		let data = dispute_statement.payload_data(candidate_hash, session_index);
+		let signature = CryptoStore::sign_with(
+			&**keystore,
+			ValidatorId::ID,
+			&validator_public.clone().into(),
+			&data,
+		).await?;
+
+		let signature = match signature {
+			Some(sig) => sig.try_into().map_err(|_| KeystoreError::KeyNotSupported(ValidatorId::ID))?,
+			None => return Ok(None),
+		};
+
+		Ok(Some(Self {
+			dispute_statement,
+			candidate_hash,
+			validator_public,
+			validator_signature: signature,
+			session_index,
+		}))
+	}
+
+	/// Access the underlying dispute statement
+	pub fn statement(&self) -> &DisputeStatement {
+		&self.dispute_statement
+	}
+
+	/// Access the underlying candidate hash.
+	pub fn candidate_hash(&self) -> &CandidateHash {
+		&self.candidate_hash
+	}
+
+	/// Access the underlying validator public key.
+	pub fn validator_public(&self) -> &ValidatorId {
+		&self.validator_public
+	}
+
+	/// Access the underlying validator signature.
+	pub fn validator_signature(&self) -> &ValidatorSignature {
+		&self.validator_signature
+	}
+
+	/// Access the underlying session index.
+	pub fn session_index(&self) -> SessionIndex {
+		self.session_index
+	}
+}
+
+/// Any invalid vote (currently only explicit).
+#[derive(Clone, Encode, Decode, Debug)]
+pub struct InvalidDisputeVote {
+	/// The voting validator index.
+	pub validator_index: ValidatorIndex,
+
+	/// The validator signature, that can be verified when constructing a
+	/// `SignedDisputeStatement`.
+	pub signature: ValidatorSignature,
+
+	/// Kind of dispute statement.
+	pub kind: InvalidDisputeStatementKind,
+}
+
+/// Any valid vote (backing, approval, explicit).
+#[derive(Clone, Encode, Decode, Debug)]
+pub struct ValidDisputeVote {
+	/// The voting validator index.
+	pub validator_index: ValidatorIndex,
+
+	/// The validator signature, that can be verified when constructing a
+	/// `SignedDisputeStatement`.
+	pub signature: ValidatorSignature,
+
+	/// Kind of dispute statement.
+	pub kind: ValidDisputeStatementKind,
+}
diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs
index 490367db4ccabbe1d085581693780c0b3c4ca877..88449e133ddd037fc5ab92f7556a08f5ab06f6f8 100644
--- a/polkadot/node/primitives/src/lib.rs
+++ b/polkadot/node/primitives/src/lib.rs
@@ -22,14 +22,12 @@
 
 #![deny(missing_docs)]
 
-use std::convert::TryInto;
+
 use std::pin::Pin;
 
 use serde::{Serialize, Deserialize};
 use futures::Future;
 use parity_scale_codec::{Decode, Encode};
-use sp_keystore::{CryptoStore, SyncCryptoStorePtr, Error as KeystoreError};
-use sp_application_crypto::AppKey;
 
 pub use sp_core::traits::SpawnNamed;
 pub use sp_consensus_babe::{
@@ -40,20 +38,32 @@ use polkadot_primitives::v1::{
 	BlakeTwo256, CandidateCommitments, CandidateHash, CollatorPair, CommittedCandidateReceipt,
 	CompactStatement, EncodeAs, Hash, HashT, HeadData, Id as ParaId, OutboundHrmpMessage,
 	PersistedValidationData, Signed, UncheckedSigned, UpwardMessage, ValidationCode,
-	ValidatorIndex, ValidatorSignature, ValidDisputeStatementKind, InvalidDisputeStatementKind,
-	CandidateReceipt, ValidatorId, SessionIndex, DisputeStatement, MAX_CODE_SIZE, MAX_POV_SIZE,
+	ValidatorIndex, SessionIndex, MAX_CODE_SIZE, MAX_POV_SIZE,
 };
 
 pub use polkadot_parachain::primitives::BlockData;
 
 pub mod approval;
 
+/// Disputes related types.
+pub mod disputes;
+pub use disputes::{
+	SignedDisputeStatement, UncheckedDisputeMessage, DisputeMessage, CandidateVotes, InvalidDisputeVote, ValidDisputeVote,
+	DisputeMessageCheckError,
+};
+
 /// The bomb limit for decompressing code blobs.
 pub const VALIDATION_CODE_BOMB_LIMIT: usize = (MAX_CODE_SIZE * 4u32) as usize;
 
 /// The bomb limit for decompressing PoV blobs.
 pub const POV_BOMB_LIMIT: usize = (MAX_POV_SIZE * 4u32) as usize;
 
+/// It would be nice to draw this from the chain state, but we have no tools for it right now.
+/// On Polkadot this is 1 day, and on Kusama it's 6 hours.
+///
+/// Number of sessions we want to consider in disputes.
+pub const DISPUTE_WINDOW: SessionIndex = 6;
+
 /// The cumulative weight of a block in a fork-choice rule.
 pub type BlockWeight = u32;
 
@@ -283,125 +293,3 @@ pub fn maybe_compress_pov(pov: PoV) -> PoV {
 	let pov = PoV { block_data: BlockData(raw) };
 	pov
 }
-
-/// Tracked votes on candidates, for the purposes of dispute resolution.
-#[derive(Debug, Clone)]
-pub struct CandidateVotes {
-	/// The receipt of the candidate itself.
-	pub candidate_receipt: CandidateReceipt,
-	/// Votes of validity, sorted by validator index.
-	pub valid: Vec<(ValidDisputeStatementKind, ValidatorIndex, ValidatorSignature)>,
-	/// Votes of invalidity, sorted by validator index.
-	pub invalid: Vec<(InvalidDisputeStatementKind, ValidatorIndex, ValidatorSignature)>,
-}
-
-impl CandidateVotes {
-	/// Get the set of all validators who have votes in the set, ascending.
-	pub fn voted_indices(&self) -> Vec<ValidatorIndex> {
-		let mut v: Vec<_> = self.valid.iter().map(|x| x.1).chain(
-			self.invalid.iter().map(|x| x.1)
-		).collect();
-
-		v.sort();
-		v.dedup();
-
-		v
-	}
-}
-
-
-/// A checked dispute statement from an associated validator.
-#[derive(Debug, Clone)]
-pub struct SignedDisputeStatement {
-	dispute_statement: DisputeStatement,
-	candidate_hash: CandidateHash,
-	validator_public: ValidatorId,
-	validator_signature: ValidatorSignature,
-	session_index: SessionIndex,
-}
-
-impl SignedDisputeStatement {
-	/// Create a new `SignedDisputeStatement`, which is only possible by checking the signature.
-	pub fn new_checked(
-		dispute_statement: DisputeStatement,
-		candidate_hash: CandidateHash,
-		session_index: SessionIndex,
-		validator_public: ValidatorId,
-		validator_signature: ValidatorSignature,
-	) -> Result<Self, ()> {
-		dispute_statement.check_signature(
-			&validator_public,
-			candidate_hash,
-			session_index,
-			&validator_signature,
-		).map(|_| SignedDisputeStatement {
-			dispute_statement,
-			candidate_hash,
-			validator_public,
-			validator_signature,
-			session_index,
-		})
-	}
-
-	/// Sign this statement with the given keystore and key. Pass `valid = true` to
-	/// indicate validity of the candidate, and `valid = false` to indicate invalidity.
-	pub async fn sign_explicit(
-		keystore: &SyncCryptoStorePtr,
-		valid: bool,
-		candidate_hash: CandidateHash,
-		session_index: SessionIndex,
-		validator_public: ValidatorId,
-	) -> Result<Option<Self>, KeystoreError> {
-		let dispute_statement = if valid {
-			DisputeStatement::Valid(ValidDisputeStatementKind::Explicit)
-		} else {
-			DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit)
-		};
-
-		let data = dispute_statement.payload_data(candidate_hash, session_index);
-		let signature = CryptoStore::sign_with(
-			&**keystore,
-			ValidatorId::ID,
-			&validator_public.clone().into(),
-			&data,
-		).await?;
-
-		let signature = match signature {
-			Some(sig) => sig.try_into().map_err(|_| KeystoreError::KeyNotSupported(ValidatorId::ID))?,
-			None => return Ok(None),
-		};
-
-		Ok(Some(Self {
-			dispute_statement,
-			candidate_hash,
-			validator_public,
-			validator_signature: signature,
-			session_index,
-		}))
-	}
-
-	/// Access the underlying dispute statement
-	pub fn statement(&self) -> &DisputeStatement {
-		&self.dispute_statement
-	}
-
-	/// Access the underlying candidate hash.
-	pub fn candidate_hash(&self) -> &CandidateHash {
-		&self.candidate_hash
-	}
-
-	/// Access the underlying validator public key.
-	pub fn validator_public(&self) -> &ValidatorId {
-		&self.validator_public
-	}
-
-	/// Access the underlying validator signature.
-	pub fn validator_signature(&self) -> &ValidatorSignature {
-		&self.validator_signature
-	}
-
-	/// Access the underlying session index.
-	pub fn session_index(&self) -> SessionIndex {
-		self.session_index
-	}
-}
diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml
index 30b1bb87ad2d3335e58f36ce9838147ebd9f069a..f5938b4d473025adcef0b96c753d7913ac129e0d 100644
--- a/polkadot/node/service/Cargo.toml
+++ b/polkadot/node/service/Cargo.toml
@@ -88,13 +88,16 @@ westend-runtime = { path = "../../runtime/westend", optional = true }
 rococo-runtime = { path = "../../runtime/rococo", optional = true }
 
 # Polkadot Subsystems
+polkadot-approval-distribution = { path = "../network/approval-distribution", optional = true }
 polkadot-availability-bitfield-distribution = { path = "../network/bitfield-distribution", optional = true }
 polkadot-availability-distribution = { path = "../network/availability-distribution", optional = true }
 polkadot-availability-recovery = { path = "../network/availability-recovery", optional = true }
 polkadot-collator-protocol = { path = "../network/collator-protocol", optional = true }
+polkadot-dispute-distribution = { path = "../network/dispute-distribution", optional = true }
 polkadot-gossip-support = { path = "../network/gossip-support", optional = true }
 polkadot-network-bridge = { path = "../network/bridge", optional = true }
 polkadot-node-collation-generation = { path = "../collation-generation", optional = true }
+polkadot-node-core-approval-voting = { path = "../core/approval-voting", optional = true }
 polkadot-node-core-av-store = { path = "../core/av-store", optional = true }
 polkadot-node-core-backing = { path = "../core/backing", optional = true }
 polkadot-node-core-bitfield-signing = { path = "../core/bitfield-signing", optional = true }
@@ -103,8 +106,6 @@ polkadot-node-core-chain-api = { path = "../core/chain-api", optional = true }
 polkadot-node-core-provisioner = { path = "../core/provisioner", optional = true }
 polkadot-node-core-runtime-api = { path = "../core/runtime-api", optional = true }
 polkadot-statement-distribution = { path = "../network/statement-distribution", optional = true }
-polkadot-approval-distribution = { path = "../network/approval-distribution", optional = true }
-polkadot-node-core-approval-voting = { path = "../core/approval-voting", optional = true }
 
 [dev-dependencies]
 polkadot-test-client = { path = "../test/client" }
@@ -124,6 +125,7 @@ full-node = [
 	"polkadot-availability-distribution",
 	"polkadot-availability-recovery",
 	"polkadot-collator-protocol",
+	"polkadot-dispute-distribution",
 	"polkadot-gossip-support",
 	"polkadot-network-bridge",
 	"polkadot-node-collation-generation",
diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs
index 9c2081538f50adc92751777fdf0b433397463503..2bf6dd74716703ec2e7b5c7a576af46d9353774e 100644
--- a/polkadot/node/service/src/overseer.rs
+++ b/polkadot/node/service/src/overseer.rs
@@ -194,7 +194,7 @@ where
 		},
 		network_bridge: NetworkBridgeSubsystem::new(
 			network_service.clone(),
-			authority_discovery_service,
+			authority_discovery_service.clone(),
 			request_multiplexer,
 			Box::new(network_service.clone()),
 			Metrics::register(registry)?,
diff --git a/polkadot/node/subsystem-test-helpers/Cargo.toml b/polkadot/node/subsystem-test-helpers/Cargo.toml
index 3ca9f0cdaa8301596af4b70f875c126b63d78f0e..aefa64bd39dd91be726bc3051989a446c04e11ce 100644
--- a/polkadot/node/subsystem-test-helpers/Cargo.toml
+++ b/polkadot/node/subsystem-test-helpers/Cargo.toml
@@ -21,6 +21,10 @@ polkadot-statement-table = { path = "../../statement-table" }
 sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
 smallvec = "1.6.1"
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master"}
 
 [dev-dependencies]
 polkadot-overseer = { path = "../overseer" }
diff --git a/polkadot/node/subsystem-test-helpers/src/lib.rs b/polkadot/node/subsystem-test-helpers/src/lib.rs
index 87b28d3963e4e1f474acae5fc57c934d22201a80..5ac6c5acdcaf835012dceb1cbaff52800aa9a7ec 100644
--- a/polkadot/node/subsystem-test-helpers/src/lib.rs
+++ b/polkadot/node/subsystem-test-helpers/src/lib.rs
@@ -38,6 +38,9 @@ use std::sync::Arc;
 use std::task::{Context, Poll, Waker};
 use std::time::Duration;
 
+/// Generally useful mock data providers for unit tests.
+pub mod mock;
+
 enum SinkState<T> {
 	Empty {
 		read_waker: Option<Waker>,
@@ -310,7 +313,7 @@ pub fn make_subsystem_context<M, S>(
 ///
 /// Pass in two async closures: one mocks the overseer, the other runs the test from the perspective of a subsystem.
 ///
-/// Times out in two seconds.
+/// Times out in 5 seconds.
 pub fn subsystem_test_harness<M, OverseerFactory, Overseer, TestFactory, Test>(
 	overseer_factory: OverseerFactory,
 	test_factory: TestFactory,
@@ -329,7 +332,7 @@ pub fn subsystem_test_harness<M, OverseerFactory, Overseer, TestFactory, Test>(
 
 	futures::executor::block_on(async move {
 		future::join(overseer, test)
-			.timeout(Duration::from_secs(2))
+			.timeout(Duration::from_secs(5))
 			.await
 			.expect("test timed out instead of completing")
 	});
diff --git a/polkadot/node/subsystem-test-helpers/src/mock.rs b/polkadot/node/subsystem-test-helpers/src/mock.rs
new file mode 100644
index 0000000000000000000000000000000000000000..2d8671f6dc203eb5aa3389a2ad3880e6b87c4b1a
--- /dev/null
+++ b/polkadot/node/subsystem-test-helpers/src/mock.rs
@@ -0,0 +1,46 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+
+
+use std::sync::Arc;
+
+use sc_keystore::LocalKeystore;
+use sp_application_crypto::AppKey;
+use sp_keyring::Sr25519Keyring;
+use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore};
+
+use polkadot_primitives::v1::{ValidatorId, AuthorityDiscoveryId};
+
+
+/// Get mock keystore with `Ferdie` key.
+pub fn make_ferdie_keystore() -> SyncCryptoStorePtr {
+	let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory());
+	SyncCryptoStore::sr25519_generate_new(
+		&*keystore,
+		ValidatorId::ID,
+		Some(&Sr25519Keyring::Ferdie.to_seed()),
+	)
+	.expect("Insert key into keystore");
+	SyncCryptoStore::sr25519_generate_new(
+		&*keystore,
+		AuthorityDiscoveryId::ID,
+		Some(&Sr25519Keyring::Ferdie.to_seed()),
+	)
+	.expect("Insert key into keystore");
+	keystore
+}
+
diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs
index e820ab3569bbaceebd0c300c22140521d93b3ce0..0a89ed6aecc33b7952c4f5ae604a4e6ddfa36fcc 100644
--- a/polkadot/node/subsystem-types/src/messages.rs
+++ b/polkadot/node/subsystem-types/src/messages.rs
@@ -28,16 +28,8 @@ use thiserror::Error;
 
 pub use sc_network::IfDisconnected;
 
-use polkadot_node_network_protocol::{
-	peer_set::PeerSet,
-	request_response::{request::IncomingRequest, v1 as req_res_v1, Requests},
-	v1 as protocol_v1, PeerId, UnifiedReputationChange,
-};
-use polkadot_node_primitives::{
-	approval::{BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote},
-	AvailableData, BabeEpoch, CandidateVotes, CollationGenerationConfig, ErasureChunk, PoV,
-	SignedDisputeStatement, SignedFullStatement, ValidationResult, BlockWeight,
-};
+use polkadot_node_network_protocol::{PeerId, UnifiedReputationChange, peer_set::PeerSet, request_response::{request::IncomingRequest, v1 as req_res_v1, Requests}, v1 as protocol_v1};
+use polkadot_node_primitives::{AvailableData, BabeEpoch, BlockWeight, CandidateVotes, CollationGenerationConfig, DisputeMessage, ErasureChunk, PoV, SignedDisputeStatement, SignedFullStatement, ValidationResult, approval::{BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote}};
 use polkadot_primitives::v1::{
 	AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateDescriptor, CandidateEvent,
 	CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt,
@@ -144,7 +136,6 @@ impl CandidateValidationMessage {
 	}
 }
 
-
 /// Messages received by the Collator Protocol subsystem.
 #[derive(Debug, derive_more::From)]
 pub enum CollatorProtocolMessage {
@@ -217,6 +208,21 @@ pub enum DisputeCoordinatorMessage {
 		/// The validator index passed alongside each statement should correspond to the index
 		/// of the validator in the set.
 		statements: Vec<(SignedDisputeStatement, ValidatorIndex)>,
+		/// Inform the requester once we finished importing.
+		///
+		/// This is:
+		/// - we discarded the votes because
+		///		- they were ancient or otherwise invalid (result: `InvalidImport`)
+		///		- or we were not able to recover availability for an unknown candidate (result:
+		///		`InvalidImport`)
+		///		- or were known already (in that case the result will still be `ValidImport`)
+		/// - or we recorded them because (`ValidImport`)
+		///		- we casted our own vote already on that dispute
+		///		- or we have approval votes on that candidate
+		///		- or other explicit votes on that candidate already recorded
+		///		- or recovered availability for the candidate
+		///		- or the imported statements are backing/approval votes, which are always accepted.
+		pending_confirmation: oneshot::Sender<ImportStatementsResult>
 	},
 	/// Fetch a list of all active disputes that the coordinator is aware of.
 	ActiveDisputes(oneshot::Sender<Vec<(SessionIndex, CandidateHash)>>),
@@ -241,6 +247,15 @@ pub enum DisputeCoordinatorMessage {
 	}
 }
 
+/// The result of `DisputeCoordinatorMessage::ImportStatements`.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum ImportStatementsResult {
+	/// Import was invalid (candidate was not available)  and the sending peer should get banned.
+	InvalidImport,
+	/// Import was valid and can be confirmed to peer.
+	ValidImport
+}
+
 /// Messages received by the dispute participation subsystem.
 #[derive(Debug)]
 pub enum DisputeParticipationMessage {
@@ -254,9 +269,24 @@ pub enum DisputeParticipationMessage {
 		session: SessionIndex,
 		/// The number of validators in the session.
 		n_validators: u32,
+		/// Give immediate feedback on whether the candidate was available or
+		/// not.
+		report_availability: oneshot::Sender<bool>,
 	},
 }
 
+/// Messages going to the dispute distribution subsystem.
+#[derive(Debug)]
+pub enum DisputeDistributionMessage {
+
+	/// Tell dispute distribution to distribute an explicit dispute statement to
+	/// validators.
+	SendDispute(DisputeMessage),
+
+	/// Get receiver for receiving incoming network requests for dispute sending.
+	DisputeSendingReceiver(mpsc::Receiver<sc_network::config::IncomingRequest>),
+}
+
 /// Messages received by the network bridge subsystem.
 #[derive(Debug)]
 pub enum NetworkBridgeMessage {
diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs
index 882ecd122cb7466ead9075bd738fcac09c2bd1b9..c691a0010165c8751e67d7cfc823472ada374cc7 100644
--- a/polkadot/node/subsystem-util/src/runtime/mod.rs
+++ b/polkadot/node/subsystem-util/src/runtime/mod.rs
@@ -16,6 +16,8 @@
 
 //! Convenient interface to runtime information.
 
+use std::cmp::max;
+
 use lru::LruCache;
 
 use parity_scale_codec::Encode;
@@ -24,7 +26,7 @@ use sp_core::crypto::Public;
 use sp_keystore::{CryptoStore, SyncCryptoStorePtr};
 
 use polkadot_primitives::v1::{CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash, OccupiedCore, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, ValidatorId, ValidatorIndex};
-use polkadot_node_subsystem::SubsystemContext;
+use polkadot_node_subsystem::{SubsystemSender, SubsystemContext};
 
 
 use crate::{
@@ -39,6 +41,17 @@ mod error;
 use error::{recv_runtime, Result};
 pub use error::{Error, NonFatal, Fatal};
 
+/// Configuration for construction a `RuntimeInfo`.
+pub struct Config {
+	/// Needed for retrieval of `ValidatorInfo`
+	///
+	/// Pass `None` if you are not interested.
+	pub keystore: Option<SyncCryptoStorePtr>,
+
+	/// How many sessions should we keep in the cache?
+	pub session_cache_lru_size: usize,
+}
+
 /// Caching of session info.
 ///
 /// It should be ensured that a cached session stays live in the cache as long as we might need it.
@@ -74,32 +87,48 @@ pub struct ValidatorInfo {
 	pub our_group: Option<GroupIndex>,
 }
 
+impl Default for Config {
+	fn default() -> Self {
+		Self {
+			keystore: None,
+			// Usually we need to cache the current and the last session.
+			session_cache_lru_size: 2,
+		}
+	}
+}
+
 impl RuntimeInfo {
 	/// Create a new `RuntimeInfo` for convenient runtime fetches.
 	pub fn new(keystore: Option<SyncCryptoStorePtr>) -> Self {
-		Self {
-			// Adjust, depending on how many forks we want to support.
-			session_index_cache: LruCache::new(10),
-			// We need to cache the current and the last session the most:
-			session_info_cache: LruCache::new(2),
+		Self::new_with_config(Config {
 			keystore,
+			..Default::default()
+		})
+	}
+
+	/// Create with more elaborate configuration options.
+	pub fn new_with_config(cfg: Config) -> Self {
+		Self {
+			session_index_cache: LruCache::new(max(10, cfg.session_cache_lru_size)),
+			session_info_cache: LruCache::new(cfg.session_cache_lru_size),
+			keystore: cfg.keystore,
 		}
 	}
 
 	/// Retrieve the current session index.
-	pub async fn get_session_index<Context>(
+	pub async fn get_session_index<Sender>(
 		&mut self,
-		ctx: &mut Context,
+		sender: &mut Sender,
 		parent: Hash,
 	) -> Result<SessionIndex>
 	where
-		Context: SubsystemContext,
+		Sender: SubsystemSender,
 	{
 		match self.session_index_cache.get(&parent) {
 			Some(index) => Ok(*index),
 			None => {
 				let index =
-					recv_runtime(request_session_index_for_child(parent, ctx.sender()).await)
+					recv_runtime(request_session_index_for_child(parent, sender).await)
 						.await?;
 				self.session_index_cache.put(parent, index);
 				Ok(index)
@@ -108,31 +137,35 @@ impl RuntimeInfo {
 	}
 
 	/// Get `ExtendedSessionInfo` by relay parent hash.
-	pub async fn get_session_info<'a>(
+	pub async fn get_session_info<'a, Sender>(
 		&'a mut self,
-		ctx: &mut impl SubsystemContext,
+		sender: &mut Sender,
 		parent: Hash,
 	) -> Result<&'a ExtendedSessionInfo>
+	where
+		Sender: SubsystemSender,
 	{
-		let session_index = self.get_session_index(ctx, parent).await?;
+		let session_index = self.get_session_index(sender, parent).await?;
 
-		self.get_session_info_by_index(ctx, parent, session_index).await
+		self.get_session_info_by_index(sender, parent, session_index).await
 	}
 
 	/// Get `ExtendedSessionInfo` by session index.
 	///
 	/// `request_session_info` still requires the parent to be passed in, so we take the parent
 	/// in addition to the `SessionIndex`.
-	pub async fn get_session_info_by_index<'a>(
+	pub async fn get_session_info_by_index<'a, Sender>(
 		&'a mut self,
-		ctx: &mut impl SubsystemContext,
+		sender: &mut Sender,
 		parent: Hash,
 		session_index: SessionIndex,
 	) -> Result<&'a ExtendedSessionInfo>
+	where
+		Sender: SubsystemSender,
 	{
 		if !self.session_info_cache.contains(&session_index) {
 			let session_info =
-				recv_runtime(request_session_info(parent, session_index, ctx.sender()).await)
+				recv_runtime(request_session_info(parent, session_index, sender).await)
 					.await?
 					.ok_or(NonFatal::NoSuchSession(session_index))?;
 			let validator_info = self.get_validator_info(&session_info).await?;
@@ -151,19 +184,19 @@ impl RuntimeInfo {
 	}
 
 	/// Convenience function for checking the signature of something signed.
-	pub async fn check_signature<Context, Payload, RealPayload>(
+	pub async fn check_signature<Sender, Payload, RealPayload>(
 		&mut self,
-		ctx: &mut Context,
+		sender: &mut Sender,
 		parent: Hash,
 		signed: UncheckedSigned<Payload, RealPayload>,
 	) -> Result<std::result::Result<Signed<Payload, RealPayload>, UncheckedSigned<Payload, RealPayload>>>
 	where
-		Context: SubsystemContext,
+		Sender: SubsystemSender,
 		Payload: EncodeAs<RealPayload> + Clone,
 		RealPayload: Encode + Clone,
 	{
-		let session_index = self.get_session_index(ctx, parent).await?;
-		let info = self.get_session_info_by_index(ctx, parent, session_index).await?;
+		let session_index = self.get_session_index(sender, parent).await?;
+		let info = self.get_session_info_by_index(sender, parent, session_index).await?;
 		Ok(check_signature(session_index, &info.session_info, parent, signed))
 	}