From 25974f207655d8215214b6e41af4431c52f63a69 Mon Sep 17 00:00:00 2001
From: Robert Klotzner <eskimor@users.noreply.github.com>
Date: Fri, 19 Nov 2021 18:08:21 +0100
Subject: [PATCH] Dispute spam protection (#4134)

* Mostly notes.

* Better error messages.

* Introduce Fatal/NonFatal + drop back channel participation

- Fatal/NonFatal - in order to make it easier to use utility functions.
- We drop the back channel in dispute participation as it won't be
needed any more.

* Better error messages.

* Utility function for receiving `CandidateEvent`s.

* Ordering module typechecks.

* cargo fmt

* Prepare spam slots module.

* Implement SpamSlots mechanism.

* Implement queues.

* cargo fmt

* Participation.

* Participation taking shape.

* Finish participation.

* cargo fmt

* Cleanup.

* WIP: Cleanup + Integration.

* Make `RollingSessionWindow` initialized by default.

* Make approval voting typecheck.

* Get rid of lazy_static & fix approval voting tests

* Move `SessionWindowSize` to node primitives.

* Implement dispute coordinator initialization.

* cargo fmt

* Make queues return error instead of boolean.

* Initialized: WIP

* Introduce chain api for getting finalized block.

* Fix ordering to only prune candidates on finalized events.

* Pruning of old sessions in spam slots.

* New import logic.

* Make everything typecheck.

* Fix warnings.

* Get rid of obsolete dispute-participation.

* Fixes.

* Add back accidentelly deleted Cargo.lock

* Deliver disputes in an ordered fashion.

* Add module docs for errors

* Use type synonym.

* hidden docs.

* Fix overseer tests.

* Ordering provider taking `CandidateReceipt`.

... To be kicked on one next commit.

* Fix ordering to use relay_parent

as included block is not unique per candidate.

* Add comment in ordering.rs.

* Take care of duplicate entries in queues.

* Better spam slots.

* Review remarks + docs.

* Fix db tests.

* Participation tests.

* Also scrape votes on first leaf for good measure.

* Make tests typecheck.

* Spelling.

* Only participate in actual disputes, not on every import.

* Don't account backing votes to spam slots.

* Fix more tests.

* Don't participate if we don't have keys.

* Fix tests, typos and warnings.

* Fix merge error.

* Spelling fixes.

* Add missing docs.

* Queue tests.

* More tests.

* Add metrics + don't short circuit import.

* Basic test for ordering provider.

* Import fix.

* Remove dead link.

* One more dead link.

Co-authored-by: Lldenaurois <Ljdenaurois@gmail.com>
---
 polkadot/Cargo.lock                           |   19 +-
 polkadot/Cargo.toml                           |    1 -
 .../node/core/approval-voting/src/import.rs   |   36 +-
 polkadot/node/core/approval-voting/src/lib.rs |   35 +-
 .../core/dispute-coordinator/src/metrics.rs   |   24 +
 .../dispute-coordinator/src/real/backend.rs   |    7 +-
 .../dispute-coordinator/src/real/db/v1.rs     |   19 +-
 .../dispute-coordinator/src/real/error.rs     |  167 ++
 .../src/real/initialized.rs                   | 1124 +++++++++++++
 .../core/dispute-coordinator/src/real/mod.rs  | 1467 ++++-------------
 .../src/real/ordering/mod.rs                  |  219 +++
 .../src/real/ordering/tests.rs                |  165 ++
 .../src/real/participation/mod.rs             |  438 +++++
 .../src/real/participation/queues/mod.rs      |  210 +++
 .../src/real/participation/queues/tests.rs    |  131 ++
 .../src/real/participation/tests.rs           |  550 ++++++
 .../src/real/spam_slots.rs                    |  123 ++
 .../dispute-coordinator/src/real/status.rs    |  165 ++
 .../dispute-coordinator/src/real/tests.rs     |  329 ++--
 .../core/dispute-participation/Cargo.toml     |   20 -
 .../core/dispute-participation/src/lib.rs     |  372 -----
 .../core/dispute-participation/src/tests.rs   |  432 -----
 .../availability-distribution/src/error.rs    |   10 +-
 polkadot/node/network/bridge/src/tests.rs     |    2 -
 .../network/dispute-distribution/src/lib.rs   |    2 +-
 .../dispute-distribution/src/receiver/mod.rs  |    2 +-
 polkadot/node/overseer/src/dummy.rs           |    4 -
 polkadot/node/overseer/src/lib.rs             |    9 +-
 polkadot/node/overseer/src/tests.rs           |   20 +-
 polkadot/node/primitives/src/lib.rs           |   59 +-
 polkadot/node/service/Cargo.toml              |    2 -
 polkadot/node/service/src/overseer.rs         |    3 -
 polkadot/node/subsystem-types/src/messages.rs |   19 -
 polkadot/node/subsystem-util/Cargo.toml       |    2 +
 polkadot/node/subsystem-util/src/lib.rs       |    1 +
 .../src/rolling_session_window.rs             |  320 ++--
 .../node/subsystem-util/src/runtime/error.rs  |    4 +-
 .../node/subsystem-util/src/runtime/mod.rs    |   33 +-
 polkadot/primitives/src/v1/mod.rs             |   11 +
 .../roadmap/implementers-guide/src/SUMMARY.md |    1 -
 .../src/node/approval/README.md               |    2 +-
 .../src/node/disputes/dispute-coordinator.md  |    9 +-
 .../src/node/disputes/dispute-distribution.md |    7 +-
 .../node/disputes/dispute-participation.md    |   68 -
 .../src/types/overseer-protocol.md            |   25 -
 45 files changed, 4073 insertions(+), 2595 deletions(-)
 create mode 100644 polkadot/node/core/dispute-coordinator/src/real/error.rs
 create mode 100644 polkadot/node/core/dispute-coordinator/src/real/initialized.rs
 create mode 100644 polkadot/node/core/dispute-coordinator/src/real/ordering/mod.rs
 create mode 100644 polkadot/node/core/dispute-coordinator/src/real/ordering/tests.rs
 create mode 100644 polkadot/node/core/dispute-coordinator/src/real/participation/mod.rs
 create mode 100644 polkadot/node/core/dispute-coordinator/src/real/participation/queues/mod.rs
 create mode 100644 polkadot/node/core/dispute-coordinator/src/real/participation/queues/tests.rs
 create mode 100644 polkadot/node/core/dispute-coordinator/src/real/participation/tests.rs
 create mode 100644 polkadot/node/core/dispute-coordinator/src/real/spam_slots.rs
 create mode 100644 polkadot/node/core/dispute-coordinator/src/real/status.rs
 delete mode 100644 polkadot/node/core/dispute-participation/Cargo.toml
 delete mode 100644 polkadot/node/core/dispute-participation/src/lib.rs
 delete mode 100644 polkadot/node/core/dispute-participation/src/tests.rs
 delete mode 100644 polkadot/roadmap/implementers-guide/src/node/disputes/dispute-participation.md

diff --git a/polkadot/Cargo.lock b/polkadot/Cargo.lock
index d55feffad2a..7ae689bb275 100644
--- a/polkadot/Cargo.lock
+++ b/polkadot/Cargo.lock
@@ -6368,22 +6368,6 @@ dependencies = [
  "tracing",
 ]
 
-[[package]]
-name = "polkadot-node-core-dispute-participation"
-version = "0.9.13"
-dependencies = [
- "assert_matches",
- "futures 0.3.17",
- "parity-scale-codec",
- "polkadot-node-primitives",
- "polkadot-node-subsystem",
- "polkadot-node-subsystem-test-helpers",
- "polkadot-primitives",
- "sp-core",
- "thiserror",
- "tracing",
-]
-
 [[package]]
 name = "polkadot-node-core-parachains-inherent"
 version = "0.9.13"
@@ -6591,6 +6575,7 @@ dependencies = [
  "env_logger 0.9.0",
  "futures 0.3.17",
  "itertools",
+ "lazy_static",
  "log",
  "lru 0.7.0",
  "metered-channel",
@@ -6599,6 +6584,7 @@ dependencies = [
  "polkadot-node-jaeger",
  "polkadot-node-metrics",
  "polkadot-node-network-protocol",
+ "polkadot-node-primitives",
  "polkadot-node-subsystem",
  "polkadot-node-subsystem-test-helpers",
  "polkadot-overseer",
@@ -6955,7 +6941,6 @@ dependencies = [
  "polkadot-node-core-chain-api",
  "polkadot-node-core-chain-selection",
  "polkadot-node-core-dispute-coordinator",
- "polkadot-node-core-dispute-participation",
  "polkadot-node-core-parachains-inherent",
  "polkadot-node-core-provisioner",
  "polkadot-node-core-runtime-api",
diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml
index 823676c049b..3d5d5c1f12d 100644
--- a/polkadot/Cargo.toml
+++ b/polkadot/Cargo.toml
@@ -56,7 +56,6 @@ members = [
 	"node/core/chain-api",
 	"node/core/chain-selection",
 	"node/core/dispute-coordinator",
-	"node/core/dispute-participation",
 	"node/core/parachains-inherent",
 	"node/core/provisioner",
 	"node/core/pvf",
diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs
index 710c1ab55cf..111e96f1334 100644
--- a/polkadot/node/core/approval-voting/src/import.rs
+++ b/polkadot/node/core/approval-voting/src/import.rs
@@ -76,7 +76,7 @@ struct ImportedBlockInfo {
 }
 
 struct ImportedBlockInfoEnv<'a> {
-	session_window: &'a RollingSessionWindow,
+	session_window: &'a Option<RollingSessionWindow>,
 	assignment_criteria: &'a (dyn AssignmentCriteria + Send + Sync),
 	keystore: &'a LocalKeystore,
 }
@@ -133,7 +133,11 @@ async fn imported_block_info(
 			Err(_) => return Ok(None),
 		};
 
-		if env.session_window.earliest_session().map_or(true, |e| session_index < e) {
+		if env
+			.session_window
+			.as_ref()
+			.map_or(true, |s| session_index < s.earliest_session())
+		{
 			tracing::debug!(
 				target: LOG_TARGET,
 				"Block {} is from ancient session {}. Skipping",
@@ -180,7 +184,8 @@ async fn imported_block_info(
 		}
 	};
 
-	let session_info = match env.session_window.session_info(session_index) {
+	let session_info = match env.session_window.as_ref().and_then(|s| s.session_info(session_index))
+	{
 		Some(s) => s,
 		None => {
 			tracing::debug!(
@@ -324,7 +329,7 @@ pub(crate) async fn handle_new_head(
 		}
 	};
 
-	match state.session_window.cache_session_info_for_head(ctx, head).await {
+	match state.cache_session_info_for_head(ctx, head).await {
 		Err(e) => {
 			tracing::debug!(
 				target: LOG_TARGET,
@@ -335,7 +340,7 @@ pub(crate) async fn handle_new_head(
 
 			return Ok(Vec::new())
 		},
-		Ok(a @ SessionWindowUpdate::Advanced { .. }) => {
+		Ok(Some(a @ SessionWindowUpdate::Advanced { .. })) => {
 			tracing::info!(
 				target: LOG_TARGET,
 				update = ?a,
@@ -431,8 +436,9 @@ pub(crate) async fn handle_new_head(
 
 		let session_info = state
 			.session_window
-			.session_info(session_index)
-			.expect("imported_block_info requires session to be available; qed");
+			.as_ref()
+			.and_then(|s| s.session_info(session_index))
+			.expect("imported_block_info requires session info to be available; qed");
 
 		let (block_tick, no_show_duration) = {
 			let block_tick = slot_number_to_tick(state.slot_duration_millis, slot);
@@ -608,7 +614,7 @@ pub(crate) mod tests {
 
 	fn blank_state() -> State {
 		State {
-			session_window: RollingSessionWindow::new(APPROVAL_SESSIONS),
+			session_window: None,
 			keystore: Arc::new(LocalKeystore::in_memory()),
 			slot_duration_millis: 6_000,
 			clock: Box::new(MockClock::default()),
@@ -618,11 +624,11 @@ pub(crate) mod tests {
 
 	fn single_session_state(index: SessionIndex, info: SessionInfo) -> State {
 		State {
-			session_window: RollingSessionWindow::with_session_info(
+			session_window: Some(RollingSessionWindow::with_session_info(
 				APPROVAL_SESSIONS,
 				index,
 				vec![info],
-			),
+			)),
 			..blank_state()
 		}
 	}
@@ -740,7 +746,7 @@ pub(crate) mod tests {
 			let header = header.clone();
 			Box::pin(async move {
 				let env = ImportedBlockInfoEnv {
-					session_window: &session_window,
+					session_window: &Some(session_window),
 					assignment_criteria: &MockAssignmentCriteria,
 					keystore: &LocalKeystore::in_memory(),
 				};
@@ -849,7 +855,7 @@ pub(crate) mod tests {
 			let header = header.clone();
 			Box::pin(async move {
 				let env = ImportedBlockInfoEnv {
-					session_window: &session_window,
+					session_window: &Some(session_window),
 					assignment_criteria: &MockAssignmentCriteria,
 					keystore: &LocalKeystore::in_memory(),
 				};
@@ -942,7 +948,7 @@ pub(crate) mod tests {
 			.collect::<Vec<_>>();
 
 		let test_fut = {
-			let session_window = RollingSessionWindow::new(APPROVAL_SESSIONS);
+			let session_window = None;
 
 			let header = header.clone();
 			Box::pin(async move {
@@ -1037,11 +1043,11 @@ pub(crate) mod tests {
 				.map(|(r, c, g)| (r.hash(), r.clone(), *c, *g))
 				.collect::<Vec<_>>();
 
-			let session_window = RollingSessionWindow::with_session_info(
+			let session_window = Some(RollingSessionWindow::with_session_info(
 				APPROVAL_SESSIONS,
 				session,
 				vec![session_info],
-			);
+			));
 
 			let header = header.clone();
 			Box::pin(async move {
diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs
index 01edc67a335..b7ac1e35033 100644
--- a/polkadot/node/core/approval-voting/src/lib.rs
+++ b/polkadot/node/core/approval-voting/src/lib.rs
@@ -44,7 +44,10 @@ use polkadot_node_subsystem::{
 };
 use polkadot_node_subsystem_util::{
 	metrics::{self, prometheus},
-	rolling_session_window::RollingSessionWindow,
+	rolling_session_window::{
+		new_session_window_size, RollingSessionWindow, SessionWindowSize, SessionWindowUpdate,
+		SessionsUnavailable,
+	},
 	TimeoutExt,
 };
 use polkadot_primitives::v1::{
@@ -92,7 +95,8 @@ use crate::{
 #[cfg(test)]
 mod tests;
 
-const APPROVAL_SESSIONS: SessionIndex = 6;
+pub const APPROVAL_SESSIONS: SessionWindowSize = new_session_window_size!(6);
+
 const APPROVAL_CHECKING_TIMEOUT: Duration = Duration::from_secs(120);
 const APPROVAL_CACHE_SIZE: usize = 1024;
 const TICK_TOO_FAR_IN_FUTURE: Tick = 20; // 10 seconds.
@@ -568,7 +572,7 @@ impl CurrentlyCheckingSet {
 }
 
 struct State {
-	session_window: RollingSessionWindow,
+	session_window: Option<RollingSessionWindow>,
 	keystore: Arc<LocalKeystore>,
 	slot_duration_millis: u64,
 	clock: Box<dyn Clock + Send + Sync>,
@@ -577,9 +581,30 @@ struct State {
 
 impl State {
 	fn session_info(&self, i: SessionIndex) -> Option<&SessionInfo> {
-		self.session_window.session_info(i)
+		self.session_window.as_ref().and_then(|w| w.session_info(i))
 	}
 
+	/// Bring `session_window` up to date.
+	pub async fn cache_session_info_for_head(
+		&mut self,
+		ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
+		head: Hash,
+	) -> Result<Option<SessionWindowUpdate>, SessionsUnavailable> {
+		let session_window = self.session_window.take();
+		match session_window {
+			None => {
+				self.session_window =
+					Some(RollingSessionWindow::new(ctx, APPROVAL_SESSIONS, head).await?);
+				Ok(None)
+			},
+			Some(mut session_window) => {
+				let r =
+					session_window.cache_session_info_for_head(ctx, head).await.map(Option::Some);
+				self.session_window = Some(session_window);
+				r
+			},
+		}
+	}
 	// Compute the required tranches for approval for this block and candidate combo.
 	// Fails if there is no approval entry for the block under the candidate or no candidate entry
 	// under the block, or if the session is out of bounds.
@@ -671,7 +696,7 @@ where
 	B: Backend,
 {
 	let mut state = State {
-		session_window: RollingSessionWindow::new(APPROVAL_SESSIONS),
+		session_window: None,
 		keystore: subsystem.keystore,
 		slot_duration_millis: subsystem.slot_duration_millis,
 		clock,
diff --git a/polkadot/node/core/dispute-coordinator/src/metrics.rs b/polkadot/node/core/dispute-coordinator/src/metrics.rs
index b70ec09db97..df7cae08291 100644
--- a/polkadot/node/core/dispute-coordinator/src/metrics.rs
+++ b/polkadot/node/core/dispute-coordinator/src/metrics.rs
@@ -24,6 +24,8 @@ struct MetricsInner {
 	votes: prometheus::CounterVec<prometheus::U64>,
 	/// Conclusion across all disputes.
 	concluded: prometheus::CounterVec<prometheus::U64>,
+	/// Number of participations that have been queued.
+	queued_participations: prometheus::CounterVec<prometheus::U64>,
 }
 
 /// Candidate validation metrics.
@@ -61,6 +63,18 @@ impl Metrics {
 			metrics.concluded.with_label_values(&["invalid"]).inc();
 		}
 	}
+
+	pub(crate) fn on_queued_priority_participation(&self) {
+		if let Some(metrics) = &self.0 {
+			metrics.queued_participations.with_label_values(&["priority"]).inc();
+		}
+	}
+
+	pub(crate) fn on_queued_best_effort_participation(&self) {
+		if let Some(metrics) = &self.0 {
+			metrics.queued_participations.with_label_values(&["best-effort"]).inc();
+		}
+	}
 }
 
 impl metrics::Metrics for Metrics {
@@ -93,6 +107,16 @@ impl metrics::Metrics for Metrics {
 				)?,
 				registry,
 			)?,
+			queued_participations: prometheus::register(
+				prometheus::CounterVec::new(
+					prometheus::Opts::new(
+						"parachain_dispute_participations",
+						"Total number of queued participations, grouped by priority and best-effort. (Not every queueing will necessarily lead to an actual participation because of duplicates.)",
+					),
+					&["priority"],
+				)?,
+				registry,
+			)?,
 		};
 		Ok(Metrics(Some(metrics)))
 	}
diff --git a/polkadot/node/core/dispute-coordinator/src/real/backend.rs b/polkadot/node/core/dispute-coordinator/src/real/backend.rs
index 75a9caa7599..71d2dd35ccb 100644
--- a/polkadot/node/core/dispute-coordinator/src/real/backend.rs
+++ b/polkadot/node/core/dispute-coordinator/src/real/backend.rs
@@ -26,7 +26,10 @@ use polkadot_primitives::v1::{CandidateHash, SessionIndex};
 
 use std::collections::HashMap;
 
-use super::db::v1::{CandidateVotes, RecentDisputes};
+use super::{
+	db::v1::{CandidateVotes, RecentDisputes},
+	error::FatalResult,
+};
 
 #[derive(Debug)]
 pub enum BackendWriteOp {
@@ -53,7 +56,7 @@ pub trait Backend {
 
 	/// Atomically writes the list of operations, with later operations taking precedence over
 	/// prior.
-	fn write<I>(&mut self, ops: I) -> SubsystemResult<()>
+	fn write<I>(&mut self, ops: I) -> FatalResult<()>
 	where
 		I: IntoIterator<Item = BackendWriteOp>;
 }
diff --git a/polkadot/node/core/dispute-coordinator/src/real/db/v1.rs b/polkadot/node/core/dispute-coordinator/src/real/db/v1.rs
index 50835a0a542..63d06d9101b 100644
--- a/polkadot/node/core/dispute-coordinator/src/real/db/v1.rs
+++ b/polkadot/node/core/dispute-coordinator/src/real/db/v1.rs
@@ -27,12 +27,11 @@ use std::sync::Arc;
 use kvdb::{DBTransaction, KeyValueDB};
 use parity_scale_codec::{Decode, Encode};
 
-use crate::{
-	real::{
-		backend::{Backend, BackendWriteOp, OverlayedBackend},
-		DISPUTE_WINDOW,
-	},
-	DisputeStatus,
+use crate::real::{
+	backend::{Backend, BackendWriteOp, OverlayedBackend},
+	error::{Fatal, FatalResult},
+	status::DisputeStatus,
+	DISPUTE_WINDOW,
 };
 
 const RECENT_DISPUTES_KEY: &[u8; 15] = b"recent-disputes";
@@ -72,7 +71,7 @@ impl Backend for DbBackend {
 
 	/// Atomically writes the list of operations, with later operations taking precedence over
 	/// prior.
-	fn write<I>(&mut self, ops: I) -> SubsystemResult<()>
+	fn write<I>(&mut self, ops: I) -> FatalResult<()>
 	where
 		I: IntoIterator<Item = BackendWriteOp>,
 	{
@@ -98,7 +97,7 @@ impl Backend for DbBackend {
 			}
 		}
 
-		self.inner.write(tx).map_err(Into::into)
+		self.inner.write(tx).map_err(Fatal::DbWriteFailed)
 	}
 }
 
@@ -214,7 +213,7 @@ pub(crate) fn note_current_session(
 	overlay_db: &mut OverlayedBackend<'_, impl Backend>,
 	current_session: SessionIndex,
 ) -> SubsystemResult<()> {
-	let new_earliest = current_session.saturating_sub(DISPUTE_WINDOW);
+	let new_earliest = current_session.saturating_sub(DISPUTE_WINDOW.get());
 	match overlay_db.load_earliest_session()? {
 		None => {
 			// First launch - write new-earliest.
@@ -421,7 +420,7 @@ mod tests {
 
 		let prev_earliest_session = 0;
 		let new_earliest_session = 5;
-		let current_session = 5 + DISPUTE_WINDOW;
+		let current_session = 5 + DISPUTE_WINDOW.get();
 
 		let very_old = 3;
 		let slightly_old = 4;
diff --git a/polkadot/node/core/dispute-coordinator/src/real/error.rs b/polkadot/node/core/dispute-coordinator/src/real/error.rs
new file mode 100644
index 00000000000..86124bc5522
--- /dev/null
+++ b/polkadot/node/core/dispute-coordinator/src/real/error.rs
@@ -0,0 +1,167 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use futures::channel::oneshot;
+use thiserror::Error;
+
+use polkadot_node_subsystem::{
+	errors::{ChainApiError, RuntimeApiError},
+	SubsystemError,
+};
+use polkadot_node_subsystem_util::{rolling_session_window::SessionsUnavailable, runtime};
+
+use super::{db, participation};
+use crate::real::{CodecError, LOG_TARGET};
+
+/// Errors for this subsystem.
+#[derive(Debug, Error)]
+#[error(transparent)]
+pub enum Error {
+	/// All fatal errors.
+	Fatal(#[from] Fatal),
+	/// All nonfatal/potentially recoverable errors.
+	NonFatal(#[from] NonFatal),
+}
+
+/// General `Result` type for dispute coordinator.
+pub type Result<R> = std::result::Result<R, Error>;
+/// Result type with only fatal errors.
+pub type FatalResult<R> = std::result::Result<R, Fatal>;
+/// Result type with only non fatal errors.
+pub type NonFatalResult<R> = std::result::Result<R, NonFatal>;
+
+impl From<runtime::Error> for Error {
+	fn from(o: runtime::Error) -> Self {
+		match o {
+			runtime::Error::Fatal(f) => Self::Fatal(Fatal::Runtime(f)),
+			runtime::Error::NonFatal(f) => Self::NonFatal(NonFatal::Runtime(f)),
+		}
+	}
+}
+
+impl From<SubsystemError> for Error {
+	fn from(o: SubsystemError) -> Self {
+		match o {
+			SubsystemError::Context(msg) => Self::Fatal(Fatal::SubsystemContext(msg)),
+			_ => Self::NonFatal(NonFatal::Subsystem(o)),
+		}
+	}
+}
+
+/// Fatal errors of this subsystem.
+#[derive(Debug, Error)]
+pub enum Fatal {
+	/// Errors coming from runtime::Runtime.
+	#[error("Error while accessing runtime information {0}")]
+	Runtime(#[from] runtime::Fatal),
+
+	/// We received a legacy `SubystemError::Context` error which is considered fatal.
+	#[error("SubsystemError::Context error: {0}")]
+	SubsystemContext(String),
+
+	/// `ctx.spawn` failed with an error.
+	#[error("Spawning a task failed: {0}")]
+	SpawnFailed(SubsystemError),
+
+	#[error("Participation worker receiver exhausted.")]
+	ParticipationWorkerReceiverExhausted,
+
+	/// Receiving subsystem message from overseer failed.
+	#[error("Receiving message from overseer failed: {0}")]
+	SubsystemReceive(#[source] SubsystemError),
+
+	#[error("Writing to database failed: {0}")]
+	DbWriteFailed(std::io::Error),
+
+	#[error("Oneshow for receiving block number from chain API got cancelled")]
+	CanceledBlockNumber,
+
+	#[error("Retrieving block number from chain API failed with error: {0}")]
+	ChainApiBlockNumber(ChainApiError),
+}
+
+#[derive(Debug, thiserror::Error)]
+#[allow(missing_docs)]
+pub enum NonFatal {
+	#[error(transparent)]
+	RuntimeApi(#[from] RuntimeApiError),
+
+	#[error(transparent)]
+	ChainApi(#[from] ChainApiError),
+
+	#[error(transparent)]
+	Io(#[from] std::io::Error),
+
+	#[error(transparent)]
+	Oneshot(#[from] oneshot::Canceled),
+
+	#[error("Dispute import confirmation send failed (receiver canceled)")]
+	DisputeImportOneshotSend,
+
+	#[error(transparent)]
+	Subsystem(SubsystemError),
+
+	#[error(transparent)]
+	Codec(#[from] CodecError),
+
+	/// `RollingSessionWindow` was not able to retrieve `SessionInfo`s.
+	#[error("Sessions unavailable in `RollingSessionWindow`: {0}")]
+	RollingSessionWindow(#[from] SessionsUnavailable),
+
+	/// Errors coming from runtime::Runtime.
+	#[error("Error while accessing runtime information: {0}")]
+	Runtime(#[from] runtime::NonFatal),
+
+	#[error(transparent)]
+	QueueError(#[from] participation::QueueError),
+}
+
+impl From<db::v1::Error> for Error {
+	fn from(err: db::v1::Error) -> Self {
+		match err {
+			db::v1::Error::Io(io) => Self::NonFatal(NonFatal::Io(io)),
+			db::v1::Error::Codec(e) => Self::NonFatal(NonFatal::Codec(e)),
+		}
+	}
+}
+
+/// Utility for eating top level errors and log them.
+///
+/// We basically always want to try and continue on error. This utility function is meant to
+/// consume top-level errors by simply logging them
+pub fn log_error(result: Result<()>) -> std::result::Result<(), Fatal> {
+	match result {
+		Err(Error::Fatal(f)) => Err(f),
+		Err(Error::NonFatal(error)) => {
+			error.log();
+			Ok(())
+		},
+		Ok(()) => Ok(()),
+	}
+}
+
+impl NonFatal {
+	/// Log a `NonFatal`.
+	pub fn log(self) {
+		match self {
+			// don't spam the log with spurious errors
+			Self::RuntimeApi(_) | Self::Oneshot(_) =>
+				tracing::debug!(target: LOG_TARGET, error = ?self),
+			// it's worth reporting otherwise
+			_ => tracing::warn!(target: LOG_TARGET, error = ?self),
+		}
+	}
+}
diff --git a/polkadot/node/core/dispute-coordinator/src/real/initialized.rs b/polkadot/node/core/dispute-coordinator/src/real/initialized.rs
new file mode 100644
index 00000000000..59d9d4f87bd
--- /dev/null
+++ b/polkadot/node/core/dispute-coordinator/src/real/initialized.rs
@@ -0,0 +1,1124 @@
+// Copyright 2020 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Dispute coordinator subsystem in initialized state (after first active leaf is received).
+
+use std::{collections::HashSet, sync::Arc};
+
+use futures::{
+	channel::{mpsc, oneshot},
+	FutureExt, StreamExt,
+};
+
+use sc_keystore::LocalKeystore;
+
+use polkadot_node_primitives::{
+	CandidateVotes, DisputeMessage, DisputeMessageCheckError, SignedDisputeStatement,
+	DISPUTE_WINDOW,
+};
+use polkadot_node_subsystem::{
+	messages::{
+		BlockDescription, DisputeCoordinatorMessage, DisputeDistributionMessage,
+		ImportStatementsResult, RuntimeApiMessage, RuntimeApiRequest,
+	},
+	overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SubsystemContext,
+};
+use polkadot_node_subsystem_util::rolling_session_window::{
+	RollingSessionWindow, SessionWindowUpdate,
+};
+use polkadot_primitives::v1::{
+	byzantine_threshold, BlockNumber, CandidateHash, CandidateReceipt, CompactStatement,
+	DisputeStatement, DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, SessionInfo,
+	ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature,
+};
+
+use crate::{metrics::Metrics, DisputeCoordinatorSubsystem};
+
+use super::{
+	backend::Backend,
+	db,
+	error::{log_error, Fatal, FatalResult, NonFatal, NonFatalResult, Result},
+	ordering::{CandidateComparator, OrderingProvider},
+	participation::{
+		self, Participation, ParticipationRequest, ParticipationStatement, WorkerMessageReceiver,
+	},
+	spam_slots::SpamSlots,
+	status::{get_active_with_status, Clock, DisputeStatus, Timestamp},
+	OverlayedBackend, LOG_TARGET,
+};
+
+/// After the first active leaves update we transition to `Initialized` state.
+///
+/// Before the first active leaves update we can't really do much. We cannot check incoming
+/// statements for validity, we cannot query orderings, we have no valid `RollingSessionWindow`,
+/// ...
+pub struct Initialized {
+	keystore: Arc<LocalKeystore>,
+	rolling_session_window: RollingSessionWindow,
+	highest_session: SessionIndex,
+	spam_slots: SpamSlots,
+	participation: Participation,
+	ordering_provider: OrderingProvider,
+	participation_receiver: WorkerMessageReceiver,
+	metrics: Metrics,
+}
+
+impl Initialized {
+	/// Make initialized subsystem, ready to `run`.
+	pub fn new(
+		subsystem: DisputeCoordinatorSubsystem,
+		rolling_session_window: RollingSessionWindow,
+		spam_slots: SpamSlots,
+		ordering_provider: OrderingProvider,
+	) -> Self {
+		let DisputeCoordinatorSubsystem { config: _, store: _, keystore, metrics } = subsystem;
+
+		let (participation_sender, participation_receiver) = mpsc::channel(1);
+		let participation = Participation::new(participation_sender);
+		let highest_session = rolling_session_window.latest_session();
+
+		Self {
+			keystore,
+			rolling_session_window,
+			highest_session,
+			spam_slots,
+			ordering_provider,
+			participation,
+			participation_receiver,
+			metrics,
+		}
+	}
+
+	/// Run the initialized subsystem.
+	///
+	/// Optionally supply initial participations and a first leaf to process.
+	pub async fn run<B, Context>(
+		mut self,
+		mut ctx: Context,
+		mut backend: B,
+		mut participations: Vec<(Option<CandidateComparator>, ParticipationRequest)>,
+		mut first_leaf: Option<ActivatedLeaf>,
+		clock: Box<dyn Clock>,
+	) -> FatalResult<()>
+	where
+		Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
+		Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
+		B: Backend,
+	{
+		loop {
+			let res = self
+				.run_until_error(
+					&mut ctx,
+					&mut backend,
+					&mut participations,
+					&mut first_leaf,
+					&*clock,
+				)
+				.await;
+			if let Ok(()) = res {
+				tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
+				return Ok(())
+			}
+			log_error(res)?;
+		}
+	}
+
+	// Run the subsystem until an error is encountered or a `conclude` signal is received.
+	// Most errors are non-fatal and should lead to another call to this function.
+	//
+	// A return value of `Ok` indicates that an exit should be made, while non-fatal errors
+	// lead to another call to this function.
+	async fn run_until_error<B, Context>(
+		&mut self,
+		ctx: &mut Context,
+		backend: &mut B,
+		participations: &mut Vec<(Option<CandidateComparator>, ParticipationRequest)>,
+		first_leaf: &mut Option<ActivatedLeaf>,
+		clock: &dyn Clock,
+	) -> Result<()>
+	where
+		Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
+		Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
+		B: Backend,
+	{
+		for (comparator, request) in participations.drain(..) {
+			self.participation.queue_participation(ctx, comparator, request).await?;
+		}
+		if let Some(first_leaf) = first_leaf.take() {
+			let mut overlay_db = OverlayedBackend::new(backend);
+			self.scrape_on_chain_votes(ctx, &mut overlay_db, first_leaf.hash, clock.now())
+				.await?;
+			if !overlay_db.is_empty() {
+				let ops = overlay_db.into_write_ops();
+				backend.write(ops)?;
+			}
+			// Also provide first leaf to participation for good measure.
+			self.participation
+				.process_active_leaves_update(ctx, &ActiveLeavesUpdate::start_work(first_leaf))
+				.await?;
+		}
+
+		loop {
+			let mut overlay_db = OverlayedBackend::new(backend);
+			let default_confirm = Box::new(|| Ok(()));
+			let confirm_write =
+				match MuxedMessage::receive(ctx, &mut self.participation_receiver).await? {
+					MuxedMessage::Participation(msg) => {
+						let ParticipationStatement {
+							session,
+							candidate_hash,
+							candidate_receipt,
+							outcome,
+						} = self.participation.get_participation_result(ctx, msg).await?;
+						if let Some(valid) = outcome.validity() {
+							self.issue_local_statement(
+								ctx,
+								&mut overlay_db,
+								candidate_hash,
+								candidate_receipt,
+								session,
+								valid,
+								clock.now(),
+							)
+							.await?;
+						}
+						default_confirm
+					},
+					MuxedMessage::Subsystem(msg) => match msg {
+						FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()),
+						FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => {
+							self.process_active_leaves_update(
+								ctx,
+								&mut overlay_db,
+								update,
+								clock.now(),
+							)
+							.await?;
+							default_confirm
+						},
+						FromOverseer::Signal(OverseerSignal::BlockFinalized(_, n)) => {
+							self.ordering_provider.process_finalized_block(&n);
+							default_confirm
+						},
+						FromOverseer::Communication { msg } =>
+							self.handle_incoming(ctx, &mut overlay_db, msg, clock.now()).await?,
+					},
+				};
+
+			if !overlay_db.is_empty() {
+				let ops = overlay_db.into_write_ops();
+				backend.write(ops)?;
+				confirm_write()?;
+			}
+		}
+	}
+
+	async fn process_active_leaves_update(
+		&mut self,
+		ctx: &mut (impl SubsystemContext<Message = DisputeCoordinatorMessage>
+		          + overseer::SubsystemContext<Message = DisputeCoordinatorMessage>),
+		overlay_db: &mut OverlayedBackend<'_, impl Backend>,
+		update: ActiveLeavesUpdate,
+		now: u64,
+	) -> Result<()> {
+		self.ordering_provider
+			.process_active_leaves_update(ctx.sender(), &update)
+			.await?;
+		self.participation.process_active_leaves_update(ctx, &update).await?;
+
+		let new_activations = update.activated.into_iter().map(|a| a.hash);
+		for new_leaf in new_activations {
+			match self.rolling_session_window.cache_session_info_for_head(ctx, new_leaf).await {
+				Err(e) => {
+					tracing::warn!(
+					target: LOG_TARGET,
+					err = ?e,
+					"Failed to update session cache for disputes",
+					);
+					continue
+				},
+				Ok(SessionWindowUpdate::Advanced {
+					new_window_end: window_end,
+					new_window_start,
+					..
+				}) => {
+					let session = window_end;
+					if self.highest_session < session {
+						tracing::trace!(
+							target: LOG_TARGET,
+							session,
+							"Observed new session. Pruning"
+						);
+
+						self.highest_session = session;
+
+						db::v1::note_current_session(overlay_db, session)?;
+						self.spam_slots.prune_old(new_window_start);
+					}
+				},
+				Ok(SessionWindowUpdate::Unchanged) => {},
+			};
+			self.scrape_on_chain_votes(ctx, overlay_db, new_leaf, now).await?;
+		}
+
+		Ok(())
+	}
+
+	/// Scrapes on-chain votes (backing votes and concluded disputes) for a active leaf of the
+	/// relay chain.
+	async fn scrape_on_chain_votes(
+		&mut self,
+		ctx: &mut (impl SubsystemContext<Message = DisputeCoordinatorMessage>
+		          + overseer::SubsystemContext<Message = DisputeCoordinatorMessage>),
+		overlay_db: &mut OverlayedBackend<'_, impl Backend>,
+		new_leaf: Hash,
+		now: u64,
+	) -> Result<()> {
+		// obtain the concluded disputes as well as the candidate backing votes
+		// from the new leaf
+		let ScrapedOnChainVotes { session, backing_validators_per_candidate, disputes } = {
+			let (tx, rx) = oneshot::channel();
+			ctx.send_message(RuntimeApiMessage::Request(
+				new_leaf,
+				RuntimeApiRequest::FetchOnChainVotes(tx),
+			))
+			.await;
+			match rx.await {
+				Ok(Ok(Some(val))) => val,
+				Ok(Ok(None)) => {
+					tracing::trace!(
+						target: LOG_TARGET,
+						relay_parent = ?new_leaf,
+						"No on chain votes stored for relay chain leaf");
+					return Ok(())
+				},
+				Ok(Err(e)) => {
+					tracing::debug!(
+						target: LOG_TARGET,
+						relay_parent = ?new_leaf,
+						error = ?e,
+						"Could not retrieve on chain votes due to an API error");
+					return Ok(())
+				},
+				Err(e) => {
+					tracing::debug!(
+						target: LOG_TARGET,
+						relay_parent = ?new_leaf,
+						error = ?e,
+						"Could not retrieve onchain votes due to oneshot cancellation");
+					return Ok(())
+				},
+			}
+		};
+
+		if backing_validators_per_candidate.is_empty() && disputes.is_empty() {
+			return Ok(())
+		}
+
+		// Obtain the session info, for sake of `ValidatorId`s
+		// either from the rolling session window.
+		// Must be called _after_ `fn cache_session_info_for_head`
+		// which guarantees that the session info is available
+		// for the current session.
+		let session_info: SessionInfo =
+			if let Some(session_info) = self.rolling_session_window.session_info(session) {
+				session_info.clone()
+			} else {
+				tracing::warn!(
+					target: LOG_TARGET,
+					relay_parent = ?new_leaf,
+					"Could not retrieve session info from rolling session window");
+				return Ok(())
+			};
+
+		// Scraped on-chain backing votes for the candidates with
+		// the new active leaf as if we received them via gossip.
+		for (candidate_receipt, backers) in backing_validators_per_candidate {
+			let candidate_hash = candidate_receipt.hash();
+			let statements = backers
+				.into_iter()
+				.filter_map(|(validator_index, attestation)| {
+					let validator_public: ValidatorId = session_info
+						.validators
+						.get(validator_index.0 as usize)
+						.or_else(|| {
+							tracing::error!(
+							target: LOG_TARGET,
+							relay_parent = ?new_leaf,
+							"Missing public key for validator {:?}",
+							&validator_index);
+							None
+						})
+						.cloned()?;
+					let validator_signature = attestation.signature().clone();
+					let valid_statement_kind =
+						match attestation.to_compact_statement(candidate_hash) {
+							CompactStatement::Seconded(_) =>
+								ValidDisputeStatementKind::BackingSeconded(new_leaf),
+							CompactStatement::Valid(_) =>
+								ValidDisputeStatementKind::BackingValid(new_leaf),
+						};
+					let signed_dispute_statement =
+						SignedDisputeStatement::new_unchecked_from_trusted_source(
+							DisputeStatement::Valid(valid_statement_kind),
+							candidate_hash,
+							session,
+							validator_public,
+							validator_signature,
+						);
+					Some((signed_dispute_statement, validator_index))
+				})
+				.collect();
+
+			let import_result = self
+				.handle_import_statements(
+					ctx,
+					overlay_db,
+					candidate_hash,
+					MaybeCandidateReceipt::Provides(candidate_receipt),
+					session,
+					statements,
+					now,
+				)
+				.await?;
+			match import_result {
+				ImportStatementsResult::ValidImport => tracing::trace!(target: LOG_TARGET,
+																	   relay_parent = ?new_leaf,
+																	   ?session,
+																	   "Imported backing vote from on-chain"),
+				ImportStatementsResult::InvalidImport => tracing::warn!(target: LOG_TARGET,
+																		relay_parent = ?new_leaf,
+																		?session,
+																		"Attempted import of on-chain backing votes failed"),
+			}
+		}
+
+		if disputes.is_empty() {
+			return Ok(())
+		}
+
+		// Import concluded disputes from on-chain, this already went through a vote so it's assumed
+		// as verified. This will only be stored, gossiping it is not necessary.
+
+		// First try to obtain all the backings which ultimately contain the candidate
+		// receipt which we need.
+
+		for DisputeStatementSet { candidate_hash, session, statements } in disputes {
+			let statements = statements
+				.into_iter()
+				.filter_map(|(dispute_statement, validator_index, validator_signature)| {
+					let session_info: SessionInfo = if let Some(session_info) =
+						self.rolling_session_window.session_info(session)
+					{
+						session_info.clone()
+					} else {
+						tracing::warn!(
+								target: LOG_TARGET,
+								relay_parent = ?new_leaf,
+								?session,
+								"Could not retrieve session info from rolling session window for recently concluded dispute");
+						return None
+					};
+
+					let validator_public: ValidatorId = session_info
+						.validators
+						.get(validator_index.0 as usize)
+						.or_else(|| {
+							tracing::error!(
+								target: LOG_TARGET,
+								relay_parent = ?new_leaf,
+								?session,
+								"Missing public key for validator {:?} that participated in concluded dispute",
+								&validator_index);
+							None
+						})
+						.cloned()?;
+
+					Some((
+						SignedDisputeStatement::new_unchecked_from_trusted_source(
+							dispute_statement,
+							candidate_hash,
+							session,
+							validator_public,
+							validator_signature,
+						),
+						validator_index,
+					))
+				})
+				.collect::<Vec<_>>();
+			let import_result = self
+				.handle_import_statements(
+					ctx,
+					overlay_db,
+					candidate_hash,
+					// TODO <https://github.com/paritytech/polkadot/issues/4011>
+					MaybeCandidateReceipt::AssumeBackingVotePresent,
+					session,
+					statements,
+					now,
+				)
+				.await?;
+			match import_result {
+				ImportStatementsResult::ValidImport => tracing::trace!(target: LOG_TARGET,
+																	   relay_parent = ?new_leaf,
+																	   ?candidate_hash,
+																	   ?session,
+																	   "Imported statement of concluded dispute from on-chain"),
+				ImportStatementsResult::InvalidImport => tracing::warn!(target: LOG_TARGET,
+																		relay_parent = ?new_leaf,
+																		?candidate_hash,
+																		?session,
+																		"Attempted import of on-chain statement of concluded dispute failed"),
+			}
+		}
+		Ok(())
+	}
+
+	async fn handle_incoming(
+		&mut self,
+		ctx: &mut impl SubsystemContext,
+		overlay_db: &mut OverlayedBackend<'_, impl Backend>,
+		message: DisputeCoordinatorMessage,
+		now: Timestamp,
+	) -> Result<Box<dyn FnOnce() -> NonFatalResult<()>>> {
+		match message {
+			DisputeCoordinatorMessage::ImportStatements {
+				candidate_hash,
+				candidate_receipt,
+				session,
+				statements,
+				pending_confirmation,
+			} => {
+				let outcome = self
+					.handle_import_statements(
+						ctx,
+						overlay_db,
+						candidate_hash,
+						MaybeCandidateReceipt::Provides(candidate_receipt),
+						session,
+						statements,
+						now,
+					)
+					.await?;
+				let report = move || {
+					pending_confirmation
+						.send(outcome)
+						.map_err(|_| NonFatal::DisputeImportOneshotSend)
+				};
+				match outcome {
+					ImportStatementsResult::InvalidImport => {
+						report()?;
+					},
+					// In case of valid import, delay confirmation until actual disk write:
+					ImportStatementsResult::ValidImport => return Ok(Box::new(report)),
+				}
+			},
+			DisputeCoordinatorMessage::RecentDisputes(tx) => {
+				let recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default();
+				let _ = tx.send(recent_disputes.keys().cloned().collect());
+			},
+			DisputeCoordinatorMessage::ActiveDisputes(tx) => {
+				let recent_disputes =
+					overlay_db.load_recent_disputes()?.unwrap_or_default().into_iter();
+				let _ =
+					tx.send(get_active_with_status(recent_disputes, now).map(|(k, _)| k).collect());
+			},
+			DisputeCoordinatorMessage::QueryCandidateVotes(query, tx) => {
+				let mut query_output = Vec::new();
+				for (session_index, candidate_hash) in query.into_iter() {
+					if let Some(v) =
+						overlay_db.load_candidate_votes(session_index, &candidate_hash)?
+					{
+						query_output.push((session_index, candidate_hash, v.into()));
+					} else {
+						tracing::debug!(
+							target: LOG_TARGET,
+							session_index,
+							"No votes found for candidate",
+						);
+					}
+				}
+				let _ = tx.send(query_output);
+			},
+			DisputeCoordinatorMessage::IssueLocalStatement(
+				session,
+				candidate_hash,
+				candidate_receipt,
+				valid,
+			) => {
+				self.issue_local_statement(
+					ctx,
+					overlay_db,
+					candidate_hash,
+					candidate_receipt,
+					session,
+					valid,
+					now,
+				)
+				.await?;
+			},
+			DisputeCoordinatorMessage::DetermineUndisputedChain {
+				base: (base_number, base_hash),
+				block_descriptions,
+				tx,
+			} => {
+				let undisputed_chain = determine_undisputed_chain(
+					overlay_db,
+					base_number,
+					base_hash,
+					block_descriptions,
+				)?;
+
+				let _ = tx.send(undisputed_chain);
+			},
+		}
+
+		Ok(Box::new(|| Ok(())))
+	}
+
+	async fn handle_import_statements(
+		&mut self,
+		ctx: &mut impl SubsystemContext,
+		overlay_db: &mut OverlayedBackend<'_, impl Backend>,
+		candidate_hash: CandidateHash,
+		candidate_receipt: MaybeCandidateReceipt,
+		session: SessionIndex,
+		statements: Vec<(SignedDisputeStatement, ValidatorIndex)>,
+		now: Timestamp,
+	) -> Result<ImportStatementsResult> {
+		if session + DISPUTE_WINDOW.get() < self.highest_session {
+			// It is not valid to participate in an ancient dispute (spam?).
+			return Ok(ImportStatementsResult::InvalidImport)
+		}
+
+		let session_info = match self.rolling_session_window.session_info(session) {
+			None => {
+				tracing::warn!(
+					target: LOG_TARGET,
+					session,
+					"Importing statement lacks info for session which has an active dispute",
+				);
+
+				return Ok(ImportStatementsResult::InvalidImport)
+			},
+			Some(info) => info,
+		};
+		let validators = session_info.validators.clone();
+
+		let n_validators = validators.len();
+
+		let supermajority_threshold =
+			polkadot_primitives::v1::supermajority_threshold(n_validators);
+
+		// In case we are not provided with a candidate receipt
+		// we operate under the assumption, that a previous vote
+		// which included a `CandidateReceipt` was seen.
+		// This holds since every block is preceeded by the `Backing`-phase.
+		//
+		// There is one exception: A sufficiently sophisticated attacker could prevent
+		// us from seeing the backing votes by witholding arbitrary blocks, and hence we do
+		// not have a `CandidateReceipt` available.
+		let mut votes = match overlay_db
+			.load_candidate_votes(session, &candidate_hash)?
+			.map(CandidateVotes::from)
+		{
+			Some(votes) => votes,
+			None =>
+				if let MaybeCandidateReceipt::Provides(candidate_receipt) = candidate_receipt {
+					CandidateVotes { candidate_receipt, valid: Vec::new(), invalid: Vec::new() }
+				} else {
+					tracing::warn!(
+						target: LOG_TARGET,
+						session,
+						"Not seen backing vote for candidate which has an active dispute",
+					);
+					return Ok(ImportStatementsResult::InvalidImport)
+				},
+		};
+		let candidate_receipt = votes.candidate_receipt.clone();
+		let mut recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default();
+		let controlled_indices = find_controlled_validator_indices(&self.keystore, &validators);
+
+		// Whether we already cast a vote in that dispute:
+		let voted_already = {
+			let mut our_votes = votes.voted_indices();
+			our_votes.retain(|index| controlled_indices.contains(index));
+			!our_votes.is_empty()
+		};
+		let was_confirmed = recent_disputes
+			.get(&(session, candidate_hash))
+			.map_or(false, |s| s.is_confirmed_concluded());
+		let comparator = self
+			.ordering_provider
+			.candidate_comparator(ctx.sender(), &candidate_receipt)
+			.await?;
+		let is_included = comparator.is_some();
+		let is_local = statements
+			.iter()
+			.find(|(_, index)| controlled_indices.contains(index))
+			.is_some();
+
+		// Update candidate votes.
+		for (statement, val_index) in &statements {
+			if validators
+				.get(val_index.0 as usize)
+				.map_or(true, |v| v != statement.validator_public())
+			{
+				tracing::debug!(
+				target: LOG_TARGET,
+				?val_index,
+				session,
+				claimed_key = ?statement.validator_public(),
+				"Validator index doesn't match claimed key",
+				);
+
+				continue
+			}
+
+			match statement.statement().clone() {
+				DisputeStatement::Valid(valid_kind) => {
+					self.metrics.on_valid_vote();
+					insert_into_statement_vec(
+						&mut votes.valid,
+						valid_kind,
+						*val_index,
+						statement.validator_signature().clone(),
+					);
+				},
+				DisputeStatement::Invalid(invalid_kind) => {
+					self.metrics.on_invalid_vote();
+					insert_into_statement_vec(
+						&mut votes.invalid,
+						invalid_kind,
+						*val_index,
+						statement.validator_signature().clone(),
+					);
+				},
+			}
+		}
+
+		// Whether or not we know already that this is a good dispute:
+		//
+		// Note we can only know for sure whether we reached the `byzantine_threshold`  after updating candidate votes above, therefore the spam checking is afterwards:
+		let is_confirmed = is_included ||
+			was_confirmed ||
+			is_local || votes.voted_indices().len() >
+			byzantine_threshold(n_validators);
+
+		// Potential spam:
+		if !is_confirmed {
+			let mut free_spam_slots = false;
+			for (statement, index) in statements.iter() {
+				free_spam_slots |= statement.statement().is_backing() ||
+					self.spam_slots.add_unconfirmed(session, candidate_hash, *index);
+			}
+			// No reporting validator had a free spam slot:
+			if !free_spam_slots {
+				tracing::debug!(
+					target: LOG_TARGET,
+					?candidate_hash,
+					?session,
+					?statements,
+					"Rejecting import because of full spam slots."
+				);
+				return Ok(ImportStatementsResult::InvalidImport)
+			}
+		}
+
+		if is_confirmed && !was_confirmed {
+			// Former spammers have not been spammers after all:
+			self.spam_slots.clear(&(session, candidate_hash));
+		}
+
+		// Check if newly disputed.
+		let is_disputed = !votes.valid.is_empty() && !votes.invalid.is_empty();
+		let concluded_valid = votes.valid.len() >= supermajority_threshold;
+		let concluded_invalid = votes.invalid.len() >= supermajority_threshold;
+
+		// Participate in dispute if the imported vote was not local, we did not vote before either
+		// and we actually have keys to issue a local vote.
+		if !is_local && !voted_already && is_disputed && !controlled_indices.is_empty() {
+			tracing::trace!(
+				target: LOG_TARGET,
+				candidate_hash = ?candidate_receipt.hash(),
+				priority = ?comparator.is_some(),
+				"Queuing participation for candidate"
+			);
+			if comparator.is_some() {
+				self.metrics.on_queued_priority_participation();
+			} else {
+				self.metrics.on_queued_best_effort_participation();
+			}
+			// Participate whenever the imported vote was local & we did not had no cast
+			// previously:
+			let r = self
+				.participation
+				.queue_participation(
+					ctx,
+					comparator,
+					ParticipationRequest::new(candidate_receipt, session, n_validators),
+				)
+				.await;
+			log_error(r)?;
+		}
+
+		let prev_status = recent_disputes.get(&(session, candidate_hash)).map(|x| x.clone());
+
+		let status = if is_disputed {
+			let status = recent_disputes.entry((session, candidate_hash)).or_insert_with(|| {
+				tracing::info!(
+					target: LOG_TARGET,
+					?candidate_hash,
+					session,
+					"New dispute initiated for candidate.",
+				);
+				DisputeStatus::active()
+			});
+
+			if is_confirmed {
+				*status = status.confirm();
+			}
+
+			// Note: concluded-invalid overwrites concluded-valid,
+			// so we do this check first. Dispute state machine is
+			// non-commutative.
+			if concluded_valid {
+				*status = status.concluded_for(now);
+			}
+
+			if concluded_invalid {
+				*status = status.concluded_against(now);
+			}
+
+			Some(*status)
+		} else {
+			None
+		};
+
+		if status != prev_status {
+			if prev_status.is_none() {
+				self.metrics.on_open();
+			}
+
+			if concluded_valid {
+				self.metrics.on_concluded_valid();
+			}
+			if concluded_invalid {
+				self.metrics.on_concluded_invalid();
+			}
+
+			// Only write when updated:
+			overlay_db.write_recent_disputes(recent_disputes);
+		}
+
+		overlay_db.write_candidate_votes(session, candidate_hash, votes.into());
+
+		Ok(ImportStatementsResult::ValidImport)
+	}
+
+	async fn issue_local_statement(
+		&mut self,
+		ctx: &mut impl SubsystemContext,
+		overlay_db: &mut OverlayedBackend<'_, impl Backend>,
+		candidate_hash: CandidateHash,
+		candidate_receipt: CandidateReceipt,
+		session: SessionIndex,
+		valid: bool,
+		now: Timestamp,
+	) -> Result<()> {
+		// Load session info.
+		let info = match self.rolling_session_window.session_info(session) {
+			None => {
+				tracing::warn!(
+					target: LOG_TARGET,
+					session,
+					"Missing info for session which has an active dispute",
+				);
+
+				return Ok(())
+			},
+			Some(info) => info,
+		};
+
+		let validators = info.validators.clone();
+
+		let votes = overlay_db
+			.load_candidate_votes(session, &candidate_hash)?
+			.map(CandidateVotes::from)
+			.unwrap_or_else(|| CandidateVotes {
+				candidate_receipt: candidate_receipt.clone(),
+				valid: Vec::new(),
+				invalid: Vec::new(),
+			});
+
+		// Sign a statement for each validator index we control which has
+		// not already voted. This should generally be maximum 1 statement.
+		let voted_indices = votes.voted_indices();
+		let mut statements = Vec::new();
+
+		let voted_indices: HashSet<_> = voted_indices.into_iter().collect();
+		let controlled_indices = find_controlled_validator_indices(&self.keystore, &validators[..]);
+		for index in controlled_indices {
+			if voted_indices.contains(&index) {
+				continue
+			}
+
+			let keystore = self.keystore.clone() as Arc<_>;
+			let res = SignedDisputeStatement::sign_explicit(
+				&keystore,
+				valid,
+				candidate_hash,
+				session,
+				validators[index.0 as usize].clone(),
+			)
+			.await;
+
+			match res {
+				Ok(Some(signed_dispute_statement)) => {
+					statements.push((signed_dispute_statement, index));
+				},
+				Ok(None) => {},
+				Err(e) => {
+					tracing::error!(
+					target: LOG_TARGET,
+					err = ?e,
+					"Encountered keystore error while signing dispute statement",
+					);
+				},
+			}
+		}
+
+		// Get our message out:
+		for (statement, index) in &statements {
+			let dispute_message =
+				match make_dispute_message(info, &votes, statement.clone(), *index) {
+					Err(err) => {
+						tracing::debug!(
+							target: LOG_TARGET,
+							?err,
+							"Creating dispute message failed."
+						);
+						continue
+					},
+					Ok(dispute_message) => dispute_message,
+				};
+
+			ctx.send_message(DisputeDistributionMessage::SendDispute(dispute_message)).await;
+		}
+
+		// Do import
+		if !statements.is_empty() {
+			match self
+				.handle_import_statements(
+					ctx,
+					overlay_db,
+					candidate_hash,
+					MaybeCandidateReceipt::Provides(candidate_receipt),
+					session,
+					statements,
+					now,
+				)
+				.await?
+			{
+				ImportStatementsResult::InvalidImport => {
+					tracing::error!(
+						target: LOG_TARGET,
+						?candidate_hash,
+						?session,
+						"`handle_import_statements` considers our own votes invalid!"
+					);
+				},
+				ImportStatementsResult::ValidImport => {
+					tracing::trace!(
+						target: LOG_TARGET,
+						?candidate_hash,
+						?session,
+						"`handle_import_statements` successfully imported our vote!"
+					);
+				},
+			}
+		}
+
+		Ok(())
+	}
+}
+
+/// Messages to be handled in this subsystem.
+enum MuxedMessage {
+	/// Messages from other subsystems.
+	Subsystem(FromOverseer<DisputeCoordinatorMessage>),
+	/// Messages from participation workers.
+	Participation(participation::WorkerMessage),
+}
+
+impl MuxedMessage {
+	async fn receive(
+		ctx: &mut (impl SubsystemContext<Message = DisputeCoordinatorMessage>
+		          + overseer::SubsystemContext<Message = DisputeCoordinatorMessage>),
+		from_sender: &mut participation::WorkerMessageReceiver,
+	) -> FatalResult<Self> {
+		// We are only fusing here to make `select` happy, in reality we will quit if the stream
+		// ends.
+		let from_overseer = ctx.recv().fuse();
+		futures::pin_mut!(from_overseer, from_sender);
+		futures::select!(
+			msg = from_overseer => Ok(Self::Subsystem(msg.map_err(Fatal::SubsystemReceive)?)),
+			msg = from_sender.next() => Ok(Self::Participation(msg.ok_or(Fatal::ParticipationWorkerReceiverExhausted)?)),
+		)
+	}
+}
+
+fn insert_into_statement_vec<T>(
+	vec: &mut Vec<(T, ValidatorIndex, ValidatorSignature)>,
+	tag: T,
+	val_index: ValidatorIndex,
+	val_signature: ValidatorSignature,
+) {
+	let pos = match vec.binary_search_by_key(&val_index, |x| x.1) {
+		Ok(_) => return, // no duplicates needed.
+		Err(p) => p,
+	};
+
+	vec.insert(pos, (tag, val_index, val_signature));
+}
+
+#[derive(Debug, Clone)]
+enum MaybeCandidateReceipt {
+	/// Directly provides the candiate receipt.
+	Provides(CandidateReceipt),
+	/// Assumes it was seen before by means of seconded message.
+	AssumeBackingVotePresent,
+}
+
+#[derive(Debug, thiserror::Error)]
+enum DisputeMessageCreationError {
+	#[error("There was no opposite vote available")]
+	NoOppositeVote,
+	#[error("Found vote had an invalid validator index that could not be found")]
+	InvalidValidatorIndex,
+	#[error("Statement found in votes had invalid signature.")]
+	InvalidStoredStatement,
+	#[error(transparent)]
+	InvalidStatementCombination(DisputeMessageCheckError),
+}
+
+fn make_dispute_message(
+	info: &SessionInfo,
+	votes: &CandidateVotes,
+	our_vote: SignedDisputeStatement,
+	our_index: ValidatorIndex,
+) -> std::result::Result<DisputeMessage, DisputeMessageCreationError> {
+	let validators = &info.validators;
+
+	let (valid_statement, valid_index, invalid_statement, invalid_index) =
+		if let DisputeStatement::Valid(_) = our_vote.statement() {
+			let (statement_kind, validator_index, validator_signature) =
+				votes.invalid.get(0).ok_or(DisputeMessageCreationError::NoOppositeVote)?.clone();
+			let other_vote = SignedDisputeStatement::new_checked(
+				DisputeStatement::Invalid(statement_kind),
+				our_vote.candidate_hash().clone(),
+				our_vote.session_index(),
+				validators
+					.get(validator_index.0 as usize)
+					.ok_or(DisputeMessageCreationError::InvalidValidatorIndex)?
+					.clone(),
+				validator_signature,
+			)
+			.map_err(|()| DisputeMessageCreationError::InvalidStoredStatement)?;
+			(our_vote, our_index, other_vote, validator_index)
+		} else {
+			let (statement_kind, validator_index, validator_signature) =
+				votes.valid.get(0).ok_or(DisputeMessageCreationError::NoOppositeVote)?.clone();
+			let other_vote = SignedDisputeStatement::new_checked(
+				DisputeStatement::Valid(statement_kind),
+				our_vote.candidate_hash().clone(),
+				our_vote.session_index(),
+				validators
+					.get(validator_index.0 as usize)
+					.ok_or(DisputeMessageCreationError::InvalidValidatorIndex)?
+					.clone(),
+				validator_signature,
+			)
+			.map_err(|()| DisputeMessageCreationError::InvalidStoredStatement)?;
+			(other_vote, validator_index, our_vote, our_index)
+		};
+
+	DisputeMessage::from_signed_statements(
+		valid_statement,
+		valid_index,
+		invalid_statement,
+		invalid_index,
+		votes.candidate_receipt.clone(),
+		info,
+	)
+	.map_err(DisputeMessageCreationError::InvalidStatementCombination)
+}
+
+/// Determine the the best block and its block number.
+/// Assumes `block_descriptions` are sorted from the one
+/// with the lowest `BlockNumber` to the highest.
+fn determine_undisputed_chain(
+	overlay_db: &mut OverlayedBackend<'_, impl Backend>,
+	base_number: BlockNumber,
+	base_hash: Hash,
+	block_descriptions: Vec<BlockDescription>,
+) -> Result<(BlockNumber, Hash)> {
+	let last = block_descriptions
+		.last()
+		.map(|e| (base_number + block_descriptions.len() as BlockNumber, e.block_hash))
+		.unwrap_or((base_number, base_hash));
+
+	// Fast path for no disputes.
+	let recent_disputes = match overlay_db.load_recent_disputes()? {
+		None => return Ok(last),
+		Some(a) if a.is_empty() => return Ok(last),
+		Some(a) => a,
+	};
+
+	let is_possibly_invalid = |session, candidate_hash| {
+		recent_disputes
+			.get(&(session, candidate_hash))
+			.map_or(false, |status| status.is_possibly_invalid())
+	};
+
+	for (i, BlockDescription { session, candidates, .. }) in block_descriptions.iter().enumerate() {
+		if candidates.iter().any(|c| is_possibly_invalid(*session, *c)) {
+			if i == 0 {
+				return Ok((base_number, base_hash))
+			} else {
+				return Ok((base_number + i as BlockNumber, block_descriptions[i - 1].block_hash))
+			}
+		}
+	}
+
+	Ok(last)
+}
+
+fn find_controlled_validator_indices(
+	keystore: &LocalKeystore,
+	validators: &[ValidatorId],
+) -> HashSet<ValidatorIndex> {
+	let mut controlled = HashSet::new();
+	for (index, validator) in validators.iter().enumerate() {
+		if keystore.key_pair::<ValidatorPair>(validator).ok().flatten().is_none() {
+			continue
+		}
+
+		controlled.insert(ValidatorIndex(index as _));
+	}
+
+	controlled
+}
diff --git a/polkadot/node/core/dispute-coordinator/src/real/mod.rs b/polkadot/node/core/dispute-coordinator/src/real/mod.rs
index 4c6165545b9..551d4cedaee 100644
--- a/polkadot/node/core/dispute-coordinator/src/real/mod.rs
+++ b/polkadot/node/core/dispute-coordinator/src/real/mod.rs
@@ -22,81 +22,91 @@
 //!
 //! This subsystem will be the point which produce dispute votes, either positive or negative, based on locally-observed
 //! validation results as well as a sink for votes received by other subsystems. When importing a dispute vote from
-//! another node, this will trigger the dispute participation subsystem to recover and validate the block and call
-//! back to this subsystem.
+//! another node, this will trigger dispute participation to recover and validate the block.
 
-use std::{
-	collections::HashSet,
-	sync::Arc,
-	time::{SystemTime, UNIX_EPOCH},
-};
+use std::{collections::HashSet, sync::Arc};
 
-use futures::{channel::oneshot, prelude::*};
+use futures::FutureExt;
 use kvdb::KeyValueDB;
-use parity_scale_codec::{Decode, Encode, Error as CodecError};
-use polkadot_node_primitives::{
-	CandidateVotes, DisputeMessage, DisputeMessageCheckError, SignedDisputeStatement,
-	DISPUTE_WINDOW,
-};
+use parity_scale_codec::Error as CodecError;
+
+use sc_keystore::LocalKeystore;
+
+use polkadot_node_primitives::{CandidateVotes, DISPUTE_WINDOW};
 use polkadot_node_subsystem::{
-	errors::{ChainApiError, RuntimeApiError},
-	messages::{
-		BlockDescription, DisputeCoordinatorMessage, DisputeDistributionMessage,
-		DisputeParticipationMessage, ImportStatementsResult, RuntimeApiMessage, RuntimeApiRequest,
-	},
-	overseer, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError,
-};
-use polkadot_node_subsystem_util::rolling_session_window::{
-	RollingSessionWindow, SessionWindowUpdate,
+	messages::DisputeCoordinatorMessage, overseer, ActivatedLeaf, FromOverseer, OverseerSignal,
+	SpawnedSubsystem, SubsystemContext, SubsystemError,
 };
-use polkadot_primitives::v1::{
-	BlockNumber, CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement,
-	DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, SessionInfo,
-	ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature,
-};
-use sc_keystore::LocalKeystore;
+use polkadot_node_subsystem_util::rolling_session_window::RollingSessionWindow;
+use polkadot_primitives::v1::{ValidatorIndex, ValidatorPair};
 
 use crate::metrics::Metrics;
 use backend::{Backend, OverlayedBackend};
-use db::v1::{DbBackend, RecentDisputes};
+use db::v1::DbBackend;
+use error::{FatalResult, Result};
+
+use self::{
+	error::{Error, NonFatal},
+	ordering::CandidateComparator,
+	participation::ParticipationRequest,
+	spam_slots::{SpamSlots, UnconfirmedDisputes},
+	status::{get_active_with_status, SystemClock},
+};
 
 mod backend;
 mod db;
 
+/// Common error types for this subsystem.
+mod error;
+
+/// Subsystem after receiving the first active leaf.
+mod initialized;
+use initialized::Initialized;
+
+/// Provider of an ordering for candidates for dispute participation, see
+/// [`participation`] below.
+///
+/// If we have seen a candidate included somewhere, we should treat it as priority and will be able
+/// to provide an ordering for participation. Thus a dispute for a candidate where we can get some
+/// ordering is high-priority (we know it is a valid dispute) and those can be ordered by
+/// `participation` based on `relay_parent` block number and other metrics, so each validator will
+/// participate in disputes in a similar order, which ensures we will be resolving disputes, even
+/// under heavy load.
+mod ordering;
+use ordering::OrderingProvider;
+
+/// When importing votes we will check via the `ordering` module, whether or not we know of the
+/// candidate to be included somewhere. If not, the votes might be spam, in this case we want to
+/// limit the amount of locally imported votes, to prevent DoS attacks/resource exhaustion. The
+/// `spam_slots` module helps keeping track of unconfirmed disputes per validators, if a spam slot
+/// gets full, we will drop any further potential spam votes from that validator and report back
+/// that the import failed. Which will lead to any honest validator to retry, thus the spam slots
+/// can be relatively small, as a drop is not fatal.
+mod spam_slots;
+
+/// Handling of participation requests via `Participation`.
+///
+/// `Participation` provides an API (`Participation::queue_participation`) for queuing of dispute participations and will process those
+/// participation requests, such that most important/urgent disputes will be resolved and processed
+/// first and more importantly it will order requests in a way so disputes will get resolved, even
+/// if there are lots of them.
+mod participation;
+
+/// Status tracking of disputes (`DisputeStatus`).
+mod status;
+use status::Clock;
+
 #[cfg(test)]
 mod tests;
 
 const LOG_TARGET: &str = "parachain::dispute-coordinator";
 
-// The choice here is fairly arbitrary. But any dispute that concluded more than a few minutes ago
-// is not worth considering anymore. Changing this value has little to no bearing on consensus,
-// and really only affects the work that the node might do on startup during periods of many disputes.
-const ACTIVE_DURATION_SECS: Timestamp = 180;
-
-/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS reboots.
-type Timestamp = u64;
-
-#[derive(Eq, PartialEq)]
-enum Participation {
-	Pending,
-	Complete,
-}
-
-impl Participation {
-	fn complete(&mut self) -> bool {
-		let complete = *self == Participation::Complete;
-		if !complete {
-			*self = Participation::Complete
-		}
-		complete
-	}
-}
-
-struct State {
+/// An implementation of the dispute coordinator subsystem.
+pub struct DisputeCoordinatorSubsystem {
+	config: Config,
+	store: Arc<dyn KeyValueDB>,
 	keystore: Arc<LocalKeystore>,
-	highest_session: Option<SessionIndex>,
-	rolling_session_window: RollingSessionWindow,
-	recovery_state: Participation,
+	metrics: Metrics,
 }
 
 /// Configuration for the dispute coordinator subsystem.
@@ -112,1175 +122,288 @@ impl Config {
 	}
 }
 
-/// An implementation of the dispute coordinator subsystem.
-pub struct DisputeCoordinatorSubsystem {
-	config: Config,
-	store: Arc<dyn KeyValueDB>,
-	keystore: Arc<LocalKeystore>,
-	metrics: Metrics,
-}
-
-impl DisputeCoordinatorSubsystem {
-	/// Create a new instance of the subsystem.
-	pub fn new(
-		store: Arc<dyn KeyValueDB>,
-		config: Config,
-		keystore: Arc<LocalKeystore>,
-		metrics: Metrics,
-	) -> Self {
-		DisputeCoordinatorSubsystem { store, config, keystore, metrics }
-	}
-}
-
 impl<Context> overseer::Subsystem<Context, SubsystemError> for DisputeCoordinatorSubsystem
 where
 	Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
 	Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
 {
 	fn start(self, ctx: Context) -> SpawnedSubsystem {
-		let backend = DbBackend::new(self.store.clone(), self.config.column_config());
-		let future = run(self, ctx, backend, Box::new(SystemClock)).map(|_| Ok(())).boxed();
-
-		SpawnedSubsystem { name: "dispute-coordinator-subsystem", future }
-	}
-}
-
-trait Clock: Send + Sync {
-	fn now(&self) -> Timestamp;
-}
-
-struct SystemClock;
-
-impl Clock for SystemClock {
-	fn now(&self) -> Timestamp {
-		// `SystemTime` is notoriously non-monotonic, so our timers might not work
-		// exactly as expected.
-		//
-		// Regardless, disputes are considered active based on an order of minutes,
-		// so a few seconds of slippage in either direction shouldn't affect the
-		// amount of work the node is doing significantly.
-		match SystemTime::now().duration_since(UNIX_EPOCH) {
-			Ok(d) => d.as_secs(),
-			Err(e) => {
-				tracing::warn!(
-					target: LOG_TARGET,
-					err = ?e,
-					"Current time is before unix epoch. Validation will not work correctly."
-				);
-
-				0
-			},
+		let future = async {
+			let backend = DbBackend::new(self.store.clone(), self.config.column_config());
+			self.run(ctx, backend, Box::new(SystemClock))
+				.await
+				.map_err(|e| SubsystemError::with_origin("dispute-coordinator", e))
 		}
-	}
-}
-
-#[derive(Debug, thiserror::Error)]
-#[allow(missing_docs)]
-pub enum Error {
-	#[error(transparent)]
-	RuntimeApi(#[from] RuntimeApiError),
-
-	#[error(transparent)]
-	ChainApi(#[from] ChainApiError),
-
-	#[error(transparent)]
-	Io(#[from] std::io::Error),
-
-	#[error(transparent)]
-	Oneshot(#[from] oneshot::Canceled),
+		.boxed();
 
-	#[error("Oneshot send failed")]
-	OneshotSend,
-
-	#[error(transparent)]
-	Subsystem(#[from] SubsystemError),
-
-	#[error(transparent)]
-	Codec(#[from] CodecError),
-}
-
-impl From<db::v1::Error> for Error {
-	fn from(err: db::v1::Error) -> Self {
-		match err {
-			db::v1::Error::Io(io) => Self::Io(io),
-			db::v1::Error::Codec(e) => Self::Codec(e),
-		}
-	}
-}
-
-impl Error {
-	fn trace(&self) {
-		match self {
-			// don't spam the log with spurious errors
-			Self::RuntimeApi(_) | Self::Oneshot(_) =>
-				tracing::debug!(target: LOG_TARGET, err = ?self),
-			// it's worth reporting otherwise
-			_ => tracing::warn!(target: LOG_TARGET, err = ?self),
-		}
+		SpawnedSubsystem { name: "dispute-coordinator-subsystem", future }
 	}
 }
 
-/// The status of dispute. This is a state machine which can be altered by the
-/// helper methods.
-#[derive(Debug, Clone, Copy, Encode, Decode, PartialEq)]
-pub enum DisputeStatus {
-	/// The dispute is active and unconcluded.
-	#[codec(index = 0)]
-	Active,
-	/// The dispute has been concluded in favor of the candidate
-	/// since the given timestamp.
-	#[codec(index = 1)]
-	ConcludedFor(Timestamp),
-	/// The dispute has been concluded against the candidate
-	/// since the given timestamp.
-	///
-	/// This takes precedence over `ConcludedFor` in the case that
-	/// both are true, which is impossible unless a large amount of
-	/// validators are participating on both sides.
-	#[codec(index = 2)]
-	ConcludedAgainst(Timestamp),
-}
-
-impl DisputeStatus {
-	/// Initialize the status to the active state.
-	pub fn active() -> DisputeStatus {
-		DisputeStatus::Active
-	}
-
-	/// Transition the status to a new status after observing the dispute has concluded for the candidate.
-	/// This may be a no-op if the status was already concluded.
-	pub fn concluded_for(self, now: Timestamp) -> DisputeStatus {
-		match self {
-			DisputeStatus::Active => DisputeStatus::ConcludedFor(now),
-			DisputeStatus::ConcludedFor(at) => DisputeStatus::ConcludedFor(std::cmp::min(at, now)),
-			against => against,
-		}
-	}
-
-	/// Transition the status to a new status after observing the dispute has concluded against the candidate.
-	/// This may be a no-op if the status was already concluded.
-	pub fn concluded_against(self, now: Timestamp) -> DisputeStatus {
-		match self {
-			DisputeStatus::Active => DisputeStatus::ConcludedAgainst(now),
-			DisputeStatus::ConcludedFor(at) =>
-				DisputeStatus::ConcludedAgainst(std::cmp::min(at, now)),
-			DisputeStatus::ConcludedAgainst(at) =>
-				DisputeStatus::ConcludedAgainst(std::cmp::min(at, now)),
-		}
-	}
-
-	/// Whether the disputed candidate is possibly invalid.
-	pub fn is_possibly_invalid(&self) -> bool {
-		match self {
-			DisputeStatus::Active | DisputeStatus::ConcludedAgainst(_) => true,
-			DisputeStatus::ConcludedFor(_) => false,
-		}
+impl DisputeCoordinatorSubsystem {
+	/// Create a new instance of the subsystem.
+	pub fn new(
+		store: Arc<dyn KeyValueDB>,
+		config: Config,
+		keystore: Arc<LocalKeystore>,
+		metrics: Metrics,
+	) -> Self {
+		Self { store, config, keystore, metrics }
 	}
 
-	/// Yields the timestamp this dispute concluded at, if any.
-	pub fn concluded_at(&self) -> Option<Timestamp> {
-		match self {
-			DisputeStatus::Active => None,
-			DisputeStatus::ConcludedFor(at) | DisputeStatus::ConcludedAgainst(at) => Some(*at),
-		}
-	}
-}
+	/// Initialize and afterwards run `Initialized::run`.
+	async fn run<B, Context>(
+		self,
+		mut ctx: Context,
+		backend: B,
+		clock: Box<dyn Clock>,
+	) -> FatalResult<()>
+	where
+		Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
+		Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
+		B: Backend + 'static,
+	{
+		let res = self.initialize(&mut ctx, backend, &*clock).await?;
 
-async fn run<B, Context>(
-	subsystem: DisputeCoordinatorSubsystem,
-	mut ctx: Context,
-	mut backend: B,
-	clock: Box<dyn Clock>,
-) where
-	Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
-	Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
-	B: Backend,
-{
-	loop {
-		let res = run_until_error(&mut ctx, &subsystem, &mut backend, &*clock).await;
-		match res {
-			Err(e) => {
-				e.trace();
+		let (participations, first_leaf, initialized, backend) = match res {
+			// Concluded:
+			None => return Ok(()),
+			Some(r) => r,
+		};
 
-				if let Error::Subsystem(SubsystemError::Context(_)) = e {
-					break
-				}
-			},
-			Ok(()) => {
-				tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
-				break
-			},
-		}
+		initialized.run(ctx, backend, participations, Some(first_leaf), clock).await
 	}
-}
 
-// Run the subsystem until an error is encountered or a `conclude` signal is received.
-// Most errors are non-fatal and should lead to another call to this function.
-//
-// A return value of `Ok` indicates that an exit should be made, while non-fatal errors
-// lead to another call to this function.
-async fn run_until_error<B, Context>(
-	ctx: &mut Context,
-	subsystem: &DisputeCoordinatorSubsystem,
-	backend: &mut B,
-	clock: &dyn Clock,
-) -> Result<(), Error>
-where
-	Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
-	Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
-	B: Backend,
-{
-	let mut state = State {
-		keystore: subsystem.keystore.clone(),
-		highest_session: None,
-		rolling_session_window: RollingSessionWindow::new(DISPUTE_WINDOW),
-		recovery_state: Participation::Pending,
-	};
-	let metrics = &subsystem.metrics;
+	/// Make sure to recover participations properly on startup.
+	async fn initialize<B, Context>(
+		self,
+		ctx: &mut Context,
+		mut backend: B,
+		clock: &(dyn Clock),
+	) -> FatalResult<
+		Option<(
+			Vec<(Option<CandidateComparator>, ParticipationRequest)>,
+			ActivatedLeaf,
+			Initialized,
+			B,
+		)>,
+	>
+	where
+		Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
+		Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
+		B: Backend + 'static,
+	{
+		loop {
+			let (first_leaf, rolling_session_window) = match get_rolling_session_window(ctx).await {
+				Ok(Some(update)) => update,
+				Ok(None) => {
+					tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
+					return Ok(None)
+				},
+				Err(Error::Fatal(f)) => return Err(f),
+				Err(Error::NonFatal(e)) => {
+					e.log();
+					continue
+				},
+			};
 
-	loop {
-		let mut overlay_db = OverlayedBackend::new(backend);
-		match ctx.recv().await? {
-			FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()),
-			FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => {
-				handle_new_activations(
+			let mut overlay_db = OverlayedBackend::new(&mut backend);
+			let (participations, spam_slots, ordering_provider) = match self
+				.handle_startup(
 					ctx,
+					first_leaf.clone(),
+					&rolling_session_window,
 					&mut overlay_db,
-					&mut state,
-					update.activated.into_iter().map(|a| a.hash),
-					clock.now(),
-					&metrics,
+					clock,
 				)
-				.await?;
-				if !state.recovery_state.complete() {
-					handle_startup(ctx, &mut overlay_db, &mut state).await?;
-				}
-			},
-			FromOverseer::Signal(OverseerSignal::BlockFinalized(_, _)) => {},
-			FromOverseer::Communication { msg } =>
-				handle_incoming(ctx, &mut overlay_db, &mut state, msg, clock.now(), &metrics)
-					.await?,
-		}
+				.await
+			{
+				Ok(v) => v,
+				Err(Error::Fatal(f)) => return Err(f),
+				Err(Error::NonFatal(e)) => {
+					e.log();
+					continue
+				},
+			};
+			if !overlay_db.is_empty() {
+				let ops = overlay_db.into_write_ops();
+				backend.write(ops)?;
+			}
 
-		if !overlay_db.is_empty() {
-			let ops = overlay_db.into_write_ops();
-			backend.write(ops)?;
+			return Ok(Some((
+				participations,
+				first_leaf,
+				Initialized::new(self, rolling_session_window, spam_slots, ordering_provider),
+				backend,
+			)))
 		}
 	}
-}
-
-// Restores the subsystem's state before proceeding with the main event loop. Primarily, this
-// repopulates the rolling session window the relevant session information to handle incoming
-// import statement requests.
-//
-// This method also retransmits a `DisputeParticiationMessage::Participate` for any non-concluded
-// disputes for which the subsystem doesn't have a local statement, ensuring it eventually makes an
-// arbitration on the dispute.
-async fn handle_startup<Context>(
-	ctx: &mut Context,
-	overlay_db: &mut OverlayedBackend<'_, impl Backend>,
-	state: &mut State,
-) -> Result<(), Error>
-where
-	Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
-	Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
-{
-	let recent_disputes = match overlay_db.load_recent_disputes() {
-		Ok(Some(disputes)) => disputes,
-		Ok(None) => return Ok(()),
-		Err(e) => {
-			tracing::error!(target: LOG_TARGET, "Failed initial load of recent disputes: {:?}", e);
-			return Err(e.into())
-		},
-	};
 
-	// Filter out disputes that have already concluded.
-	let active_disputes = recent_disputes
-		.into_iter()
-		.filter(|(_, status)| *status == DisputeStatus::Active)
-		.collect::<RecentDisputes>();
+	// Restores the subsystem's state before proceeding with the main event loop.
+	//
+	// - Prune any old disputes.
+	// - Find disputes we need to participate in.
+	// - Initialize spam slots & OrderingProvider.
+	async fn handle_startup<Context>(
+		&self,
+		ctx: &mut Context,
+		initial_head: ActivatedLeaf,
+		rolling_session_window: &RollingSessionWindow,
+		overlay_db: &mut OverlayedBackend<'_, impl Backend>,
+		clock: &dyn Clock,
+	) -> Result<(
+		Vec<(Option<CandidateComparator>, ParticipationRequest)>,
+		SpamSlots,
+		OrderingProvider,
+	)>
+	where
+		Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
+		Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
+	{
+		// Prune obsolete disputes:
+		db::v1::note_current_session(overlay_db, rolling_session_window.latest_session())?;
 
-	for ((session, ref candidate_hash), _) in active_disputes.into_iter() {
-		let votes: CandidateVotes = match overlay_db.load_candidate_votes(session, candidate_hash) {
-			Ok(Some(votes)) => votes.into(),
-			Ok(None) => continue,
+		let active_disputes = match overlay_db.load_recent_disputes() {
+			Ok(Some(disputes)) =>
+				get_active_with_status(disputes.into_iter(), clock.now()).collect(),
+			Ok(None) => Vec::new(),
 			Err(e) => {
 				tracing::error!(
 					target: LOG_TARGET,
-					"Failed initial load of candidate votes: {:?}",
+					"Failed initial load of recent disputes: {:?}",
 					e
 				);
-				continue
-			},
-		};
-
-		let validators = match state.rolling_session_window.session_info(session) {
-			None => {
-				tracing::warn!(
-					target: LOG_TARGET,
-					session,
-					"Recovering lacks info for session which has an active dispute",
-				);
-				continue
-			},
-			Some(info) => info.validators.clone(),
-		};
-
-		let n_validators = validators.len();
-		let voted_indices: HashSet<_> = votes.voted_indices().into_iter().collect();
-
-		// Determine if there are any missing local statements for this dispute. Validators are
-		// filtered if:
-		//  1) their statement already exists, or
-		//  2) the validator key is not in the local keystore (i.e. the validator is remote).
-		// The remaining set only contains local validators that are also missing statements.
-		let missing_local_statement = validators
-			.iter()
-			.enumerate()
-			.map(|(index, validator)| (ValidatorIndex(index as _), validator))
-			.any(|(index, validator)| {
-				!voted_indices.contains(&index) &&
-					state
-						.keystore
-						.key_pair::<ValidatorPair>(validator)
-						.ok()
-						.map_or(false, |v| v.is_some())
-			});
-
-		// Send a `DisputeParticipationMessage` for all non-concluded disputes which do not have a
-		// recorded local statement.
-		if missing_local_statement {
-			let (report_availability, receive_availability) = oneshot::channel();
-			ctx.send_message(DisputeParticipationMessage::Participate {
-				candidate_hash: *candidate_hash,
-				candidate_receipt: votes.candidate_receipt.clone(),
-				session,
-				n_validators: n_validators as u32,
-				report_availability,
-			})
-			.await;
-
-			if !receive_availability.await? {
-				tracing::debug!(
-					target: LOG_TARGET,
-					"Participation failed. Candidate not available"
-				);
-			}
-		}
-	}
-
-	Ok(())
-}
-
-async fn handle_new_activations(
-	ctx: &mut (impl SubsystemContext<Message = DisputeCoordinatorMessage>
-	          + overseer::SubsystemContext<Message = DisputeCoordinatorMessage>),
-	overlay_db: &mut OverlayedBackend<'_, impl Backend>,
-	state: &mut State,
-	new_activations: impl IntoIterator<Item = Hash>,
-	now: u64,
-	metrics: &Metrics,
-) -> Result<(), Error> {
-	for new_leaf in new_activations {
-		match state.rolling_session_window.cache_session_info_for_head(ctx, new_leaf).await {
-			Err(e) => {
-				tracing::warn!(
-					target: LOG_TARGET,
-					err = ?e,
-					"Failed to update session cache for disputes",
-				);
-				continue
-			},
-			Ok(SessionWindowUpdate::Initialized { window_end, .. }) |
-			Ok(SessionWindowUpdate::Advanced { new_window_end: window_end, .. }) => {
-				let session = window_end;
-				if state.highest_session.map_or(true, |s| s < session) {
-					tracing::trace!(target: LOG_TARGET, session, "Observed new session. Pruning");
-
-					state.highest_session = Some(session);
-
-					db::v1::note_current_session(overlay_db, session)?;
-				}
-			},
-			Ok(SessionWindowUpdate::Unchanged) => {},
-		};
-		scrape_on_chain_votes(ctx, overlay_db, state, new_leaf, now, metrics).await?;
-	}
-
-	Ok(())
-}
-
-/// Scrapes on-chain votes (backing votes and concluded disputes) for a active leaf of the relay chain.
-async fn scrape_on_chain_votes(
-	ctx: &mut (impl SubsystemContext<Message = DisputeCoordinatorMessage>
-	          + overseer::SubsystemContext<Message = DisputeCoordinatorMessage>),
-	overlay_db: &mut OverlayedBackend<'_, impl Backend>,
-	state: &mut State,
-	new_leaf: Hash,
-	now: u64,
-	metrics: &Metrics,
-) -> Result<(), Error> {
-	// obtain the concluded disputes as well as the candidate backing votes
-	// from the new leaf
-	let ScrapedOnChainVotes { session, backing_validators_per_candidate, disputes } = {
-		let (tx, rx) = oneshot::channel();
-		ctx.send_message(RuntimeApiMessage::Request(
-			new_leaf,
-			RuntimeApiRequest::FetchOnChainVotes(tx),
-		))
-		.await;
-		match rx.await {
-			Ok(Ok(Some(val))) => val,
-			Ok(Ok(None)) => {
-				tracing::trace!(
-					target: LOG_TARGET,
-					relay_parent = ?new_leaf,
-					"No on chain votes stored for relay chain leaf");
-				return Ok(())
-			},
-			Ok(Err(e)) => {
-				tracing::debug!(
-					target: LOG_TARGET,
-					relay_parent = ?new_leaf,
-					error = ?e,
-					"Could not retrieve on chain votes due to an API error");
-				return Ok(())
-			},
-			Err(e) => {
-				tracing::debug!(
-					target: LOG_TARGET,
-					relay_parent = ?new_leaf,
-					error = ?e,
-					"Could not retrieve onchain votes due to oneshot cancellation");
-				return Ok(())
+				return Err(e.into())
 			},
-		}
-	};
-
-	if backing_validators_per_candidate.is_empty() && disputes.is_empty() {
-		return Ok(())
-	}
-
-	// Obtain the session info, for sake of `ValidatorId`s
-	// either from the rolling session window.
-	// Must be called _after_ `fn cache_session_info_for_head`
-	// which guarantees that the session info is available
-	// for the current session.
-	let session_info: SessionInfo =
-		if let Some(session_info) = state.rolling_session_window.session_info(session) {
-			session_info.clone()
-		} else {
-			tracing::warn!(
-				target: LOG_TARGET,
-				relay_parent = ?new_leaf,
-				"Could not retrieve session info from rolling session window");
-			return Ok(())
 		};
 
-	// Scraped on-chain backing votes for the candidates with
-	// the new active leaf as if we received them via gossip.
-	for (candidate_receipt, backers) in backing_validators_per_candidate {
-		let candidate_hash = candidate_receipt.hash();
-		let statements = backers.into_iter().filter_map(|(validator_index, attestation)| {
-			let validator_public: ValidatorId = session_info
-				.validators
-				.get(validator_index.0 as usize)
-				.or_else(|| {
-					tracing::error!(
-						target: LOG_TARGET,
-						relay_parent = ?new_leaf,
-						"Missing public key for validator {:?}",
-						&validator_index);
-					None
-				})
-				.cloned()?;
-			let validator_signature = attestation.signature().clone();
-			let valid_statement_kind = match attestation.to_compact_statement(candidate_hash) {
-				CompactStatement::Seconded(_) =>
-					ValidDisputeStatementKind::BackingSeconded(new_leaf),
-				CompactStatement::Valid(_) => ValidDisputeStatementKind::BackingValid(new_leaf),
-			};
-			let signed_dispute_statement =
-				SignedDisputeStatement::new_unchecked_from_trusted_source(
-					DisputeStatement::Valid(valid_statement_kind),
-					candidate_hash,
-					session,
-					validator_public,
-					validator_signature,
-				);
-			Some((signed_dispute_statement, validator_index))
-		});
-		let import_result = handle_import_statements(
-			ctx,
-			overlay_db,
-			state,
-			candidate_hash,
-			MaybeCandidateReceipt::Provides(candidate_receipt),
-			session,
-			statements,
-			now,
-			metrics,
-		)
-		.await?;
-		match import_result {
-			ImportStatementsResult::ValidImport => tracing::trace!(target: LOG_TARGET,
-				relay_parent = ?new_leaf,
-				?session,
-				"Imported backing vote from on-chain"),
-			ImportStatementsResult::InvalidImport => tracing::warn!(target: LOG_TARGET,
-				relay_parent = ?new_leaf,
-				?session,
-				"Attempted import of on-chain backing votes failed"),
-		}
-	}
-
-	if disputes.is_empty() {
-		return Ok(())
-	}
-
-	// Import concluded disputes from on-chain, this already went through a vote so it's assumed
-	// as verified. This will only be stored, gossiping it is not necessary.
-
-	// First try to obtain all the backings which ultimately contain the candidate
-	// receipt which we need.
-
-	for DisputeStatementSet { candidate_hash, session, statements } in disputes {
-		let statements = statements
-			.into_iter()
-			.filter_map(|(dispute_statement, validator_index, validator_signature)| {
-				let session_info: SessionInfo = if let Some(session_info) =
-					state.rolling_session_window.session_info(session)
-				{
-					session_info.clone()
-				} else {
-					tracing::warn!(
-					target: LOG_TARGET,
-					relay_parent = ?new_leaf,
-					?session,
-					"Could not retrieve session info from rolling session window for recently concluded dispute");
-					return None
+		let mut participation_requests = Vec::new();
+		let mut unconfirmed_disputes: UnconfirmedDisputes = UnconfirmedDisputes::new();
+		let mut ordering_provider = OrderingProvider::new(ctx.sender(), initial_head).await?;
+		for ((session, ref candidate_hash), status) in active_disputes {
+			let votes: CandidateVotes =
+				match overlay_db.load_candidate_votes(session, candidate_hash) {
+					Ok(Some(votes)) => votes.into(),
+					Ok(None) => continue,
+					Err(e) => {
+						tracing::error!(
+							target: LOG_TARGET,
+							"Failed initial load of candidate votes: {:?}",
+							e
+						);
+						continue
+					},
 				};
 
-				let validator_public: ValidatorId = session_info
-					.validators
-					.get(validator_index.0 as usize)
-					.or_else(|| {
-						tracing::error!(
+			let validators = match rolling_session_window.session_info(session) {
+				None => {
+					tracing::warn!(
 						target: LOG_TARGET,
-						relay_parent = ?new_leaf,
-						?session,
-						"Missing public key for validator {:?} that participated in concluded dispute",
-						&validator_index);
-						None
-					})
-					.cloned()?;
-
-				Some((
-					SignedDisputeStatement::new_unchecked_from_trusted_source(
-						dispute_statement,
-						candidate_hash,
 						session,
-						validator_public,
-						validator_signature,
-					),
-					validator_index,
-				))
-			})
-			.collect::<Vec<_>>();
-		let import_result = handle_import_statements(
-			ctx,
-			overlay_db,
-			state,
-			candidate_hash,
-			// TODO <https://github.com/paritytech/polkadot/issues/4011>
-			MaybeCandidateReceipt::AssumeBackingVotePresent,
-			session,
-			statements,
-			now,
-			metrics,
-		)
-		.await?;
-		match import_result {
-			ImportStatementsResult::ValidImport => tracing::trace!(target: LOG_TARGET,
-				relay_parent = ?new_leaf,
-				?candidate_hash,
-				?session,
-				"Imported statement of conlcuded dispute from on-chain"),
-			ImportStatementsResult::InvalidImport => tracing::warn!(target: LOG_TARGET,
-				relay_parent = ?new_leaf,
-				?candidate_hash,
-				?session,
-				"Attempted import of on-chain statement of concluded dispute failed"),
-		}
-	}
-	Ok(())
-}
-
-async fn handle_incoming(
-	ctx: &mut impl SubsystemContext,
-	overlay_db: &mut OverlayedBackend<'_, impl Backend>,
-	state: &mut State,
-	message: DisputeCoordinatorMessage,
-	now: Timestamp,
-	metrics: &Metrics,
-) -> Result<(), Error> {
-	match message {
-		DisputeCoordinatorMessage::ImportStatements {
-			candidate_hash,
-			candidate_receipt,
-			session,
-			statements,
-			pending_confirmation,
-		} => {
-			let outcome = handle_import_statements(
-				ctx,
-				overlay_db,
-				state,
-				candidate_hash,
-				MaybeCandidateReceipt::Provides(candidate_receipt),
-				session,
-				statements,
-				now,
-				metrics,
-			)
-			.await?;
-			pending_confirmation.send(outcome).map_err(|_| Error::OneshotSend)?;
-		},
-		DisputeCoordinatorMessage::RecentDisputes(rx) => {
-			let recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default();
-			let _ = rx.send(recent_disputes.keys().cloned().collect());
-		},
-		DisputeCoordinatorMessage::ActiveDisputes(rx) => {
-			let recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default();
-			let _ = rx.send(collect_active(recent_disputes, now));
-		},
-		DisputeCoordinatorMessage::QueryCandidateVotes(query, rx) => {
-			let mut query_output = Vec::new();
-			for (session_index, candidate_hash) in query.into_iter() {
-				if let Some(v) = overlay_db.load_candidate_votes(session_index, &candidate_hash)? {
-					query_output.push((session_index, candidate_hash, v.into()));
-				} else {
-					tracing::debug!(
-						target: LOG_TARGET,
-						session_index,
-						"No votes found for candidate",
+						"Missing info for session which has an active dispute",
 					);
-				}
-			}
-			let _ = rx.send(query_output);
-		},
-		DisputeCoordinatorMessage::IssueLocalStatement(
-			session,
-			candidate_hash,
-			candidate_receipt,
-			valid,
-		) => {
-			issue_local_statement(
-				ctx,
-				overlay_db,
-				state,
-				candidate_hash,
-				candidate_receipt,
-				session,
-				valid,
-				now,
-				metrics,
-			)
-			.await?;
-		},
-		DisputeCoordinatorMessage::DetermineUndisputedChain {
-			base: (base_number, base_hash),
-			block_descriptions,
-			tx,
-		} => {
-			let undisputed_chain =
-				determine_undisputed_chain(overlay_db, base_number, base_hash, block_descriptions)?;
-
-			let _ = tx.send(undisputed_chain);
-		},
-	}
-
-	Ok(())
-}
-
-fn collect_active(
-	recent_disputes: RecentDisputes,
-	now: Timestamp,
-) -> Vec<(SessionIndex, CandidateHash)> {
-	recent_disputes
-		.iter()
-		.filter_map(|(disputed, status)| {
-			status
-				.concluded_at()
-				.filter(|at| at + ACTIVE_DURATION_SECS < now)
-				.map_or(Some(*disputed), |_| None)
-		})
-		.collect()
-}
-
-fn insert_into_statement_vec<T>(
-	vec: &mut Vec<(T, ValidatorIndex, ValidatorSignature)>,
-	tag: T,
-	val_index: ValidatorIndex,
-	val_signature: ValidatorSignature,
-) {
-	let pos = match vec.binary_search_by_key(&val_index, |x| x.1) {
-		Ok(_) => return, // no duplicates needed.
-		Err(p) => p,
-	};
-
-	vec.insert(pos, (tag, val_index, val_signature));
-}
-
-#[derive(Debug, Clone)]
-enum MaybeCandidateReceipt {
-	/// Directly provides the candiate receipt.
-	Provides(CandidateReceipt),
-	/// Assumes it was seen before by means of seconded message.
-	AssumeBackingVotePresent,
-}
-
-async fn handle_import_statements(
-	ctx: &mut impl SubsystemContext,
-	overlay_db: &mut OverlayedBackend<'_, impl Backend>,
-	state: &mut State,
-	candidate_hash: CandidateHash,
-	candidate_receipt: MaybeCandidateReceipt,
-	session: SessionIndex,
-	statements: impl IntoIterator<Item = (SignedDisputeStatement, ValidatorIndex)>,
-	now: Timestamp,
-	metrics: &Metrics,
-) -> Result<ImportStatementsResult, Error> {
-	if state.highest_session.map_or(true, |h| session + DISPUTE_WINDOW < h) {
-		// It is not valid to participate in an ancient dispute (spam?).
-		return Ok(ImportStatementsResult::InvalidImport)
-	}
-
-	let session_info = match state.rolling_session_window.session_info(session) {
-		None => {
-			tracing::warn!(
-				target: LOG_TARGET,
-				session,
-				"Importing statement lacks info for session which has an active dispute",
-			);
-
-			return Ok(ImportStatementsResult::InvalidImport)
-		},
-		Some(info) => info,
-	};
-	let validators = session_info.validators.clone();
-
-	let n_validators = validators.len();
-
-	let supermajority_threshold = polkadot_primitives::v1::supermajority_threshold(n_validators);
-
-	// In case we are not provided with a candidate receipt
-	// we operate under the assumption, that a previous vote
-	// which included a `CandidateReceipt` was seen.
-	// This holds since every block is preceeded by the `Backing`-phase.
-	//
-	// There is one exception: A sufficiently sophisticated attacker could prevent
-	// us from seeing the backing votes by witholding arbitrary blocks, and hence we do
-	// not have a `CandidateReceipt` available.
-	let mut votes = match overlay_db
-		.load_candidate_votes(session, &candidate_hash)?
-		.map(CandidateVotes::from)
-	{
-		Some(votes) => votes,
-		None =>
-			if let MaybeCandidateReceipt::Provides(candidate_receipt) = candidate_receipt {
-				CandidateVotes { candidate_receipt, valid: Vec::new(), invalid: Vec::new() }
-			} else {
-				tracing::warn!(
-					target: LOG_TARGET,
-					session,
-					"Not seen backing vote for candidate which has an active dispute",
-				);
-				return Ok(ImportStatementsResult::InvalidImport)
-			},
-	};
-	let candidate_receipt = votes.candidate_receipt.clone();
-
-	// Update candidate votes.
-	for (statement, val_index) in statements {
-		if validators
-			.get(val_index.0 as usize)
-			.map_or(true, |v| v != statement.validator_public())
-		{
-			tracing::debug!(
-				target: LOG_TARGET,
-				?val_index,
-				session,
-				claimed_key = ?statement.validator_public(),
-				"Validator index doesn't match claimed key",
-			);
-
-			continue
-		}
-
-		match statement.statement().clone() {
-			DisputeStatement::Valid(valid_kind) => {
-				metrics.on_valid_vote();
-				insert_into_statement_vec(
-					&mut votes.valid,
-					valid_kind,
-					val_index,
-					statement.validator_signature().clone(),
-				);
-			},
-			DisputeStatement::Invalid(invalid_kind) => {
-				metrics.on_invalid_vote();
-				insert_into_statement_vec(
-					&mut votes.invalid,
-					invalid_kind,
-					val_index,
-					statement.validator_signature().clone(),
-				);
-			},
-		}
-	}
-
-	// Check if newly disputed.
-	let is_disputed = !votes.valid.is_empty() && !votes.invalid.is_empty();
-	let concluded_valid = votes.valid.len() >= supermajority_threshold;
-	let concluded_invalid = votes.invalid.len() >= supermajority_threshold;
-
-	let mut recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default();
-
-	let prev_status = recent_disputes.get(&(session, candidate_hash)).map(|x| x.clone());
-
-	let status = if is_disputed {
-		let status = recent_disputes.entry((session, candidate_hash)).or_insert_with(|| {
-			tracing::info!(
-				target: LOG_TARGET,
-				?candidate_hash,
-				session,
-				"New dispute initiated for candidate.",
-			);
-			DisputeStatus::active()
-		});
-
-		// Note: concluded-invalid overwrites concluded-valid,
-		// so we do this check first. Dispute state machine is
-		// non-commutative.
-		if concluded_valid {
-			*status = status.concluded_for(now);
-		}
-
-		if concluded_invalid {
-			*status = status.concluded_against(now);
-		}
-
-		Some(*status)
-	} else {
-		None
-	};
-
-	if status != prev_status {
-		// This branch is only hit when the candidate is freshly disputed -
-		// status was previously `None`, and now is not.
-		if prev_status.is_none() && {
-			let controlled_indices =
-				find_controlled_validator_indices(&state.keystore, &validators);
-			let voted_indices = votes.voted_indices();
-
-			!controlled_indices.iter().all(|val_index| voted_indices.contains(&val_index))
-		} {
-			// If the dispute is new, we participate UNLESS all our controlled
-			// keys have already participated.
-			//
-			// We also block the coordinator while awaiting our determination
-			// of whether the vote is available.
-			let (report_availability, receive_availability) = oneshot::channel();
-			ctx.send_message(DisputeParticipationMessage::Participate {
-				candidate_hash,
-				candidate_receipt,
-				session,
-				n_validators: n_validators as u32,
-				report_availability,
-			})
-			.await;
+					continue
+				},
+				Some(info) => info.validators.clone(),
+			};
 
-			if !receive_availability.await.map_err(Error::Oneshot)? {
-				// If the data is not available, we disregard the dispute votes.
-				// This is an indication that the dispute does not correspond to any included
-				// candidate and that it should be ignored.
-				//
-				// We expect that if the candidate is truly disputed that the higher-level network
-				// code will retry.
+			let n_validators = validators.len();
+			let voted_indices: HashSet<_> = votes.voted_indices().into_iter().collect();
+
+			// Determine if there are any missing local statements for this dispute. Validators are
+			// filtered if:
+			//  1) their statement already exists, or
+			//  2) the validator key is not in the local keystore (i.e. the validator is remote).
+			// The remaining set only contains local validators that are also missing statements.
+			let missing_local_statement = validators
+				.iter()
+				.enumerate()
+				.map(|(index, validator)| (ValidatorIndex(index as _), validator))
+				.any(|(index, validator)| {
+					!voted_indices.contains(&index) &&
+						self.keystore
+							.key_pair::<ValidatorPair>(validator)
+							.ok()
+							.map_or(false, |v| v.is_some())
+				});
+
+			let candidate_comparator = ordering_provider
+				.candidate_comparator(ctx.sender(), &votes.candidate_receipt)
+				.await?;
+			let is_included = candidate_comparator.is_some();
 
-				tracing::debug!(
-					target: LOG_TARGET,
-					"Recovering availability failed - invalid import."
-				);
-				return Ok(ImportStatementsResult::InvalidImport)
+			if !status.is_confirmed_concluded() && !is_included {
+				unconfirmed_disputes.insert((session, *candidate_hash), voted_indices);
 			}
-			metrics.on_open();
 
-			if concluded_valid {
-				metrics.on_concluded_valid();
-			}
-			if concluded_invalid {
-				metrics.on_concluded_invalid();
+			// Participate for all non-concluded disputes which do not have a
+			// recorded local statement.
+			if missing_local_statement {
+				participation_requests.push((
+					candidate_comparator,
+					ParticipationRequest::new(
+						votes.candidate_receipt.clone(),
+						session,
+						n_validators,
+					),
+				));
 			}
 		}
 
-		// Only write when updated and vote is available.
-		overlay_db.write_recent_disputes(recent_disputes);
+		Ok((
+			participation_requests,
+			SpamSlots::recover_from_state(unconfirmed_disputes),
+			ordering_provider,
+		))
 	}
-
-	overlay_db.write_candidate_votes(session, candidate_hash, votes.into());
-
-	Ok(ImportStatementsResult::ValidImport)
 }
 
-fn find_controlled_validator_indices(
-	keystore: &LocalKeystore,
-	validators: &[ValidatorId],
-) -> HashSet<ValidatorIndex> {
-	let mut controlled = HashSet::new();
-	for (index, validator) in validators.iter().enumerate() {
-		if keystore.key_pair::<ValidatorPair>(validator).ok().flatten().is_none() {
-			continue
-		}
-
-		controlled.insert(ValidatorIndex(index as _));
+/// Wait for `ActiveLeavesUpdate` on startup, returns `None` if `Conclude` signal came first.
+async fn get_rolling_session_window<Context>(
+	ctx: &mut Context,
+) -> Result<Option<(ActivatedLeaf, RollingSessionWindow)>>
+where
+	Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
+	Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
+{
+	if let Some(leaf) = wait_for_first_leaf(ctx).await? {
+		Ok(Some((
+			leaf.clone(),
+			RollingSessionWindow::new(ctx, DISPUTE_WINDOW, leaf.hash)
+				.await
+				.map_err(NonFatal::RollingSessionWindow)?,
+		)))
+	} else {
+		Ok(None)
 	}
-
-	controlled
 }
 
-async fn issue_local_statement(
-	ctx: &mut impl SubsystemContext,
-	overlay_db: &mut OverlayedBackend<'_, impl Backend>,
-	state: &mut State,
-	candidate_hash: CandidateHash,
-	candidate_receipt: CandidateReceipt,
-	session: SessionIndex,
-	valid: bool,
-	now: Timestamp,
-	metrics: &Metrics,
-) -> Result<(), Error> {
-	// Load session info.
-	let info = match state.rolling_session_window.session_info(session) {
-		None => {
-			tracing::warn!(
-				target: LOG_TARGET,
-				session,
-				"Missing info for session which has an active dispute",
-			);
-
-			return Ok(())
-		},
-		Some(info) => info,
-	};
-
-	let validators = info.validators.clone();
-
-	let votes = overlay_db
-		.load_candidate_votes(session, &candidate_hash)?
-		.map(CandidateVotes::from)
-		.unwrap_or_else(|| CandidateVotes {
-			candidate_receipt: candidate_receipt.clone(),
-			valid: Vec::new(),
-			invalid: Vec::new(),
-		});
-
-	// Sign a statement for each validator index we control which has
-	// not already voted. This should generally be maximum 1 statement.
-	let voted_indices = votes.voted_indices();
-	let mut statements = Vec::new();
-
-	let voted_indices: HashSet<_> = voted_indices.into_iter().collect();
-	let controlled_indices = find_controlled_validator_indices(&state.keystore, &validators[..]);
-	for index in controlled_indices {
-		if voted_indices.contains(&index) {
-			continue
-		}
-
-		let keystore = state.keystore.clone() as Arc<_>;
-		let res = SignedDisputeStatement::sign_explicit(
-			&keystore,
-			valid,
-			candidate_hash,
-			session,
-			validators[index.0 as usize].clone(),
-		)
-		.await;
-
-		match res {
-			Ok(Some(signed_dispute_statement)) => {
-				statements.push((signed_dispute_statement, index));
-			},
-			Ok(None) => {},
-			Err(e) => {
-				tracing::error!(
-					target: LOG_TARGET,
-					err = ?e,
-					"Encountered keystore error while signing dispute statement",
-				);
-			},
-		}
-	}
-
-	// Get our message out:
-	for (statement, index) in &statements {
-		let dispute_message = match make_dispute_message(info, &votes, statement.clone(), *index) {
-			Err(err) => {
-				tracing::debug!(target: LOG_TARGET, ?err, "Creating dispute message failed.");
-				continue
-			},
-			Ok(dispute_message) => dispute_message,
-		};
-
-		ctx.send_message(DisputeDistributionMessage::SendDispute(dispute_message)).await;
-	}
-
-	// Do import
-	if !statements.is_empty() {
-		match handle_import_statements(
-			ctx,
-			overlay_db,
-			state,
-			candidate_hash,
-			MaybeCandidateReceipt::Provides(candidate_receipt),
-			session,
-			statements,
-			now,
-			metrics,
-		)
-		.await
-		{
-			Err(_) => {
-				tracing::error!(
-					target: LOG_TARGET,
-					?candidate_hash,
-					?session,
-					"pending confirmation receiver got dropped by `handle_import_statements` for our own votes!"
-				);
-			},
-			Ok(ImportStatementsResult::InvalidImport) => {
-				tracing::error!(
-					target: LOG_TARGET,
-					?candidate_hash,
-					?session,
-					"`handle_import_statements` considers our own votes invalid!"
-				);
+/// Wait for `ActiveLeavesUpdate`, returns `None` if `Conclude` signal came first.
+async fn wait_for_first_leaf<Context>(ctx: &mut Context) -> Result<Option<ActivatedLeaf>>
+where
+	Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
+	Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
+{
+	loop {
+		match ctx.recv().await? {
+			FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(None),
+			FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => {
+				if let Some(activated) = update.activated {
+					return Ok(Some(activated))
+				}
 			},
-			Ok(ImportStatementsResult::ValidImport) => {
-				tracing::trace!(
+			FromOverseer::Signal(OverseerSignal::BlockFinalized(_, _)) => {},
+			FromOverseer::Communication { msg } =>
+			// Note: It seems we really should not receive any messages before the first
+			// `ActiveLeavesUpdate`, if that proves wrong over time and we do receive
+			// messages before the first `ActiveLeavesUpdate` that should not be dropped,
+			// this can easily be fixed by collecting those messages and passing them on to
+			// `Initialized::new()`.
+				tracing::warn!(
 					target: LOG_TARGET,
-					?candidate_hash,
-					?session,
-					"`handle_import_statements` successfully imported our vote!"
-				);
-			},
-		}
-	}
-
-	Ok(())
-}
-
-#[derive(Debug, thiserror::Error)]
-enum DisputeMessageCreationError {
-	#[error("There was no opposite vote available")]
-	NoOppositeVote,
-	#[error("Found vote had an invalid validator index that could not be found")]
-	InvalidValidatorIndex,
-	#[error("Statement found in votes had invalid signature.")]
-	InvalidStoredStatement,
-	#[error(transparent)]
-	InvalidStatementCombination(DisputeMessageCheckError),
-}
-
-fn make_dispute_message(
-	info: &SessionInfo,
-	votes: &CandidateVotes,
-	our_vote: SignedDisputeStatement,
-	our_index: ValidatorIndex,
-) -> Result<DisputeMessage, DisputeMessageCreationError> {
-	let validators = &info.validators;
-
-	let (valid_statement, valid_index, invalid_statement, invalid_index) =
-		if let DisputeStatement::Valid(_) = our_vote.statement() {
-			let (statement_kind, validator_index, validator_signature) =
-				votes.invalid.get(0).ok_or(DisputeMessageCreationError::NoOppositeVote)?.clone();
-			let other_vote = SignedDisputeStatement::new_checked(
-				DisputeStatement::Invalid(statement_kind),
-				our_vote.candidate_hash().clone(),
-				our_vote.session_index(),
-				validators
-					.get(validator_index.0 as usize)
-					.ok_or(DisputeMessageCreationError::InvalidValidatorIndex)?
-					.clone(),
-				validator_signature,
-			)
-			.map_err(|()| DisputeMessageCreationError::InvalidStoredStatement)?;
-			(our_vote, our_index, other_vote, validator_index)
-		} else {
-			let (statement_kind, validator_index, validator_signature) =
-				votes.valid.get(0).ok_or(DisputeMessageCreationError::NoOppositeVote)?.clone();
-			let other_vote = SignedDisputeStatement::new_checked(
-				DisputeStatement::Valid(statement_kind),
-				our_vote.candidate_hash().clone(),
-				our_vote.session_index(),
-				validators
-					.get(validator_index.0 as usize)
-					.ok_or(DisputeMessageCreationError::InvalidValidatorIndex)?
-					.clone(),
-				validator_signature,
-			)
-			.map_err(|()| DisputeMessageCreationError::InvalidStoredStatement)?;
-			(other_vote, validator_index, our_vote, our_index)
-		};
-
-	DisputeMessage::from_signed_statements(
-		valid_statement,
-		valid_index,
-		invalid_statement,
-		invalid_index,
-		votes.candidate_receipt.clone(),
-		info,
-	)
-	.map_err(DisputeMessageCreationError::InvalidStatementCombination)
-}
-
-/// Determine the the best block and its block number.
-/// Assumes `block_descriptions` are sorted from the one
-/// with the lowest `BlockNumber` to the highest.
-fn determine_undisputed_chain(
-	overlay_db: &mut OverlayedBackend<'_, impl Backend>,
-	base_number: BlockNumber,
-	base_hash: Hash,
-	block_descriptions: Vec<BlockDescription>,
-) -> Result<(BlockNumber, Hash), Error> {
-	let last = block_descriptions
-		.last()
-		.map(|e| (base_number + block_descriptions.len() as BlockNumber, e.block_hash))
-		.unwrap_or((base_number, base_hash));
-
-	// Fast path for no disputes.
-	let recent_disputes = match overlay_db.load_recent_disputes()? {
-		None => return Ok(last),
-		Some(a) if a.is_empty() => return Ok(last),
-		Some(a) => a,
-	};
-
-	let is_possibly_invalid = |session, candidate_hash| {
-		recent_disputes
-			.get(&(session, candidate_hash))
-			.map_or(false, |status| status.is_possibly_invalid())
-	};
-
-	for (i, BlockDescription { session, candidates, .. }) in block_descriptions.iter().enumerate() {
-		if candidates.iter().any(|c| is_possibly_invalid(*session, *c)) {
-			if i == 0 {
-				return Ok((base_number, base_hash))
-			} else {
-				return Ok((base_number + i as BlockNumber, block_descriptions[i - 1].block_hash))
-			}
+					?msg,
+					"Received msg before first active leaves update. This is not expected - message will be dropped."
+				),
 		}
 	}
-
-	Ok(last)
 }
diff --git a/polkadot/node/core/dispute-coordinator/src/real/ordering/mod.rs b/polkadot/node/core/dispute-coordinator/src/real/ordering/mod.rs
new file mode 100644
index 00000000000..80ddce5039d
--- /dev/null
+++ b/polkadot/node/core/dispute-coordinator/src/real/ordering/mod.rs
@@ -0,0 +1,219 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use std::{
+	cmp::{Ord, Ordering, PartialOrd},
+	collections::{BTreeMap, HashSet},
+};
+
+use futures::channel::oneshot;
+
+use polkadot_node_subsystem::{
+	messages::ChainApiMessage, ActivatedLeaf, ActiveLeavesUpdate, SubsystemSender,
+};
+use polkadot_node_subsystem_util::runtime::get_candidate_events;
+use polkadot_primitives::v1::{BlockNumber, CandidateEvent, CandidateHash, CandidateReceipt, Hash};
+
+use super::{
+	error::{Fatal, FatalResult, Result},
+	LOG_TARGET,
+};
+
+#[cfg(test)]
+mod tests;
+
+/// Provider of `CandidateComparator` for candidates.
+pub struct OrderingProvider {
+	/// All candidates we have seen included, which not yet have been finalized.
+	included_candidates: HashSet<CandidateHash>,
+	/// including block -> `CandidateHash`
+	///
+	/// We need this to clean up `included_candidates` on `ActiveLeavesUpdate`.
+	candidates_by_block_number: BTreeMap<BlockNumber, HashSet<CandidateHash>>,
+}
+
+/// `Comparator` for ordering of disputes for candidates.
+///
+/// This `comparator` makes it possible to order disputes based on age and to ensure some fairness
+/// between chains in case of equally old disputes.
+///
+/// Objective ordering between nodes is important in case of lots disputes, so nodes will pull in
+/// the same direction and work on resolving the same disputes first. This ensures that we will
+/// conclude some disputes, even if there are lots of them. While any objective ordering would
+/// suffice for this goal, ordering by age ensures we are not only resolving disputes, but also
+/// resolve the oldest one first, which are also the most urgent and important ones to resolve.
+///
+/// Note: That by `oldest` we mean oldest in terms of relay chain block number, for any block
+/// number that has not yet been finalized. If a block has been finalized already it should be
+/// treated as low priority when it comes to disputes, as even in the case of a negative outcome,
+/// we are already too late. The ordering mechanism here serves to prevent this from happening in
+/// the first place.
+#[derive(Copy, Clone)]
+pub struct CandidateComparator {
+	/// Block number of the relay parent.
+	///
+	/// Important, so we will be participating in oldest disputes first.
+	///
+	/// Note: In theory it would make more sense to use the `BlockNumber` of the including
+	/// block, as inclusion time is the actual relevant event when it comes to ordering. The
+	/// problem is, that a candidate can get included multiple times on forks, so the `BlockNumber`
+	/// of the including block is not unique. We could theoretically work around that problem, by
+	/// just using the lowest `BlockNumber` of all available including blocks - the problem is,
+	/// that is not stable. If a new fork appears after the fact, we would start ordering the same
+	/// candidate differently, which would result in the same candidate getting queued twice.
+	relay_parent_block_number: BlockNumber,
+	/// By adding the `CandidateHash`, we can guarantee a unique ordering across candidates.
+	candidate_hash: CandidateHash,
+}
+
+impl PartialEq for CandidateComparator {
+	fn eq(&self, other: &CandidateComparator) -> bool {
+		Ordering::Equal == self.cmp(other)
+	}
+}
+
+impl Eq for CandidateComparator {}
+
+impl PartialOrd for CandidateComparator {
+	fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+		Some(self.cmp(other))
+	}
+}
+
+impl Ord for CandidateComparator {
+	fn cmp(&self, other: &Self) -> Ordering {
+		match self.relay_parent_block_number.cmp(&other.relay_parent_block_number) {
+			Ordering::Equal => (),
+			o => return o,
+		}
+		self.candidate_hash.cmp(&other.candidate_hash)
+	}
+}
+
+impl CandidateComparator {
+	/// Create a candidate comparator based on given (fake) values.
+	///
+	/// Useful for testing.
+	#[cfg(test)]
+	pub fn new_dummy(block_number: BlockNumber, candidate_hash: CandidateHash) -> Self {
+		Self { relay_parent_block_number: block_number, candidate_hash }
+	}
+	/// Check whether the given candidate hash belongs to this comparator.
+	pub fn matches_candidate(&self, candidate_hash: &CandidateHash) -> bool {
+		&self.candidate_hash == candidate_hash
+	}
+}
+
+impl OrderingProvider {
+	/// Create a properly initialized `OrderingProvider`.
+	pub async fn new<Sender: SubsystemSender>(
+		sender: &mut Sender,
+		initial_head: ActivatedLeaf,
+	) -> Result<Self> {
+		let mut s = Self {
+			included_candidates: HashSet::new(),
+			candidates_by_block_number: BTreeMap::new(),
+		};
+		let update =
+			ActiveLeavesUpdate { activated: Some(initial_head), deactivated: Default::default() };
+		s.process_active_leaves_update(sender, &update).await?;
+		Ok(s)
+	}
+
+	/// Retrieve a candidate `comparator` if available.
+	///
+	/// If not available, we can treat disputes concerning this candidate with low priority and
+	/// should use spam slots for such disputes.
+	pub async fn candidate_comparator<'a>(
+		&mut self,
+		sender: &mut impl SubsystemSender,
+		candidate: &CandidateReceipt,
+	) -> FatalResult<Option<CandidateComparator>> {
+		let candidate_hash = candidate.hash();
+		if !self.included_candidates.contains(&candidate_hash) {
+			return Ok(None)
+		}
+		let n = match get_block_number(sender, candidate.descriptor().relay_parent).await? {
+			None => {
+				tracing::warn!(
+					target: LOG_TARGET,
+					candidate_hash = ?candidate.hash(),
+					"Candidate's relay_parent could not be found via chain API, but we saw candidate included?!"
+				);
+				return Ok(None)
+			},
+			Some(n) => n,
+		};
+
+		Ok(Some(CandidateComparator { relay_parent_block_number: n, candidate_hash }))
+	}
+
+	/// Query active leaves for any candidate `CandidateEvent::CandidateIncluded` events.
+	///
+	/// and updates current heads, so we can query candidates for all non finalized blocks.
+	pub async fn process_active_leaves_update<Sender: SubsystemSender>(
+		&mut self,
+		sender: &mut Sender,
+		update: &ActiveLeavesUpdate,
+	) -> Result<()> {
+		if let Some(activated) = update.activated.as_ref() {
+			// Get included events:
+			let included = get_candidate_events(sender, activated.hash)
+				.await?
+				.into_iter()
+				.filter_map(|ev| match ev {
+					CandidateEvent::CandidateIncluded(receipt, _, _, _) => Some(receipt),
+					_ => None,
+				});
+			for receipt in included {
+				let candidate_hash = receipt.hash();
+				self.included_candidates.insert(candidate_hash);
+				self.candidates_by_block_number
+					.entry(activated.number)
+					.or_default()
+					.insert(candidate_hash);
+			}
+		}
+
+		Ok(())
+	}
+
+	/// Prune finalized candidates.
+	///
+	/// Once a candidate lives in a relay chain block that's behind the finalized chain/got
+	/// finalized, we can treat it as low priority.
+	pub fn process_finalized_block(&mut self, finalized: &BlockNumber) {
+		let not_finalized = self.candidates_by_block_number.split_off(finalized);
+		let finalized = std::mem::take(&mut self.candidates_by_block_number);
+		self.candidates_by_block_number = not_finalized;
+		// Clean up finalized:
+		for finalized_candidate in finalized.into_values().flatten() {
+			self.included_candidates.remove(&finalized_candidate);
+		}
+	}
+}
+
+async fn get_block_number(
+	sender: &mut impl SubsystemSender,
+	relay_parent: Hash,
+) -> FatalResult<Option<BlockNumber>> {
+	let (tx, rx) = oneshot::channel();
+	sender.send_message(ChainApiMessage::BlockNumber(relay_parent, tx).into()).await;
+
+	rx.await
+		.map_err(|_| Fatal::CanceledBlockNumber)?
+		.map_err(Fatal::ChainApiBlockNumber)
+}
diff --git a/polkadot/node/core/dispute-coordinator/src/real/ordering/tests.rs b/polkadot/node/core/dispute-coordinator/src/real/ordering/tests.rs
new file mode 100644
index 00000000000..50335f22e57
--- /dev/null
+++ b/polkadot/node/core/dispute-coordinator/src/real/ordering/tests.rs
@@ -0,0 +1,165 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use std::sync::Arc;
+
+use assert_matches::assert_matches;
+
+use futures::FutureExt;
+use parity_scale_codec::Encode;
+use sp_core::testing::TaskExecutor;
+
+use polkadot_node_subsystem::{
+	jaeger,
+	messages::{
+		AllMessages, ChainApiMessage, DisputeCoordinatorMessage, RuntimeApiMessage,
+		RuntimeApiRequest,
+	},
+	ActivatedLeaf, ActiveLeavesUpdate, LeafStatus,
+};
+use polkadot_node_subsystem_test_helpers::{
+	make_subsystem_context, TestSubsystemContext, TestSubsystemContextHandle,
+};
+use polkadot_node_subsystem_util::reexports::SubsystemContext;
+use polkadot_primitives::v1::{
+	BlakeTwo256, BlockNumber, CandidateEvent, CandidateReceipt, CoreIndex, GroupIndex, Hash, HashT,
+	HeadData,
+};
+
+use super::OrderingProvider;
+
+type VirtualOverseer = TestSubsystemContextHandle<DisputeCoordinatorMessage>;
+
+struct TestState {
+	next_block_number: BlockNumber,
+	ordering: OrderingProvider,
+	ctx: TestSubsystemContext<DisputeCoordinatorMessage, TaskExecutor>,
+}
+
+impl TestState {
+	async fn new() -> Self {
+		let (mut ctx, ctx_handle) = make_subsystem_context(TaskExecutor::new());
+		let leaf = get_activated_leaf(1);
+		launch_virtual_overseer(&mut ctx, ctx_handle);
+		Self {
+			next_block_number: 2,
+			ordering: OrderingProvider::new(ctx.sender(), leaf).await.unwrap(),
+			ctx,
+		}
+	}
+
+	/// Get a new leaf.
+	fn next_leaf(&mut self) -> ActivatedLeaf {
+		let r = get_activated_leaf(self.next_block_number);
+		self.next_block_number += 1;
+		r
+	}
+
+	async fn process_active_leaves_update(&mut self) {
+		let update = self.next_leaf();
+		self.ordering
+			.process_active_leaves_update(
+				self.ctx.sender(),
+				&ActiveLeavesUpdate::start_work(update),
+			)
+			.await
+			.unwrap();
+	}
+}
+
+/// Simulate other subsystems:
+fn launch_virtual_overseer(ctx: &mut impl SubsystemContext, ctx_handle: VirtualOverseer) {
+	ctx.spawn(
+		"serve-active-leaves-update",
+		async move { virtual_overseer(ctx_handle).await }.boxed(),
+	)
+	.unwrap();
+}
+
+async fn virtual_overseer(mut ctx_handle: VirtualOverseer) {
+	let ev = vec![CandidateEvent::CandidateIncluded(
+		CandidateReceipt::default(),
+		HeadData::default(),
+		CoreIndex::from(0),
+		GroupIndex::from(0),
+	)];
+
+	assert_matches!(
+		ctx_handle.recv().await,
+		AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+				_,
+				RuntimeApiRequest::CandidateEvents(
+					tx,
+					)
+				)) => {
+			tx.send(Ok(Vec::new())).unwrap();
+		}
+	);
+	assert_matches!(
+		ctx_handle.recv().await,
+		AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+				_,
+				RuntimeApiRequest::CandidateEvents(
+					tx,
+					)
+				)) => {
+			tx.send(Ok(ev)).unwrap();
+		}
+	);
+	assert_matches!(
+		ctx_handle.recv().await,
+		AllMessages::ChainApi(ChainApiMessage::BlockNumber(_, tx)) => {
+			tx.send(Ok(Some(1))).unwrap();
+		}
+	);
+}
+
+/// Get a dummy `ActivatedLeaf` for a given block number.
+fn get_activated_leaf(n: BlockNumber) -> ActivatedLeaf {
+	ActivatedLeaf {
+		hash: get_block_number_hash(n),
+		number: n,
+		status: LeafStatus::Fresh,
+		span: Arc::new(jaeger::Span::Disabled),
+	}
+}
+
+/// Get a dummy relay parent hash for dummy block number.
+fn get_block_number_hash(n: BlockNumber) -> Hash {
+	BlakeTwo256::hash(&n.encode())
+}
+
+#[test]
+fn ordering_provider_provides_ordering_when_initialized() {
+	futures::executor::block_on(async {
+		let mut state = TestState::new().await;
+		let r = state
+			.ordering
+			.candidate_comparator(state.ctx.sender(), &CandidateReceipt::default())
+			.await
+			.unwrap();
+		assert!(r.is_none());
+		// After next active leaves update we should have a comparator:
+		state.process_active_leaves_update().await;
+		let r = state
+			.ordering
+			.candidate_comparator(state.ctx.sender(), &CandidateReceipt::default())
+			.await
+			.unwrap();
+		assert!(r.is_some());
+		assert_eq!(r.unwrap().relay_parent_block_number, 1);
+	});
+}
diff --git a/polkadot/node/core/dispute-coordinator/src/real/participation/mod.rs b/polkadot/node/core/dispute-coordinator/src/real/participation/mod.rs
new file mode 100644
index 00000000000..67ad4af7e2d
--- /dev/null
+++ b/polkadot/node/core/dispute-coordinator/src/real/participation/mod.rs
@@ -0,0 +1,438 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use std::collections::HashSet;
+
+use futures::{
+	channel::{mpsc, oneshot},
+	FutureExt, SinkExt,
+};
+
+use polkadot_node_primitives::{ValidationResult, APPROVAL_EXECUTION_TIMEOUT};
+use polkadot_node_subsystem::{
+	messages::{AvailabilityRecoveryMessage, AvailabilityStoreMessage, CandidateValidationMessage},
+	ActiveLeavesUpdate, RecoveryError, SubsystemContext, SubsystemSender,
+};
+use polkadot_node_subsystem_util::runtime::get_validation_code_by_hash;
+use polkadot_primitives::v1::{BlockNumber, CandidateHash, CandidateReceipt, Hash, SessionIndex};
+
+use crate::real::LOG_TARGET;
+
+use super::{
+	error::{Fatal, FatalResult, NonFatal, Result},
+	ordering::CandidateComparator,
+};
+
+#[cfg(test)]
+mod tests;
+#[cfg(test)]
+pub use tests::{participation_full_happy_path, participation_missing_availability};
+
+mod queues;
+use queues::Queues;
+pub use queues::{Error as QueueError, ParticipationRequest};
+
+/// How many participation processes do we want to run in parallel the most.
+///
+/// This should be a relatively low value, while we might have a speedup once we fetched the data,
+/// due to multi core architectures, but the fetching itself can not be improved by parallel
+/// requests. This means that higher numbers make it harder for a single dispute to resolve fast.
+const MAX_PARALLEL_PARTICIPATIONS: usize = 3;
+
+/// Keep track of disputes we need to participate in.
+///
+/// - Prioritize and queue participations
+/// - Dequeue participation requests in order and launch participation worker.
+pub struct Participation {
+	/// Participations currently being processed.
+	running_participations: HashSet<CandidateHash>,
+	/// Priority and best effort queues.
+	queue: Queues,
+	/// Sender to be passed to worker tasks.
+	worker_sender: WorkerMessageSender,
+	/// Some recent block for retrieving validation code from chain.
+	recent_block: Option<(BlockNumber, Hash)>,
+}
+
+/// Message from worker tasks.
+#[derive(Debug)]
+pub struct WorkerMessage(ParticipationStatement);
+
+/// Sender use by worker tasks.
+pub type WorkerMessageSender = mpsc::Sender<WorkerMessage>;
+
+/// Receiver to receive messages from worker tasks.
+pub type WorkerMessageReceiver = mpsc::Receiver<WorkerMessage>;
+
+/// Statement as result of the validation process.
+#[derive(Debug)]
+pub struct ParticipationStatement {
+	/// Relevant session.
+	pub session: SessionIndex,
+	/// The candidate the worker has been spawned for.
+	pub candidate_hash: CandidateHash,
+	/// Used receipt.
+	pub candidate_receipt: CandidateReceipt,
+	/// Actual result.
+	pub outcome: ParticipationOutcome,
+}
+
+/// Outcome of the validation process.
+#[derive(Copy, Clone, Debug)]
+pub enum ParticipationOutcome {
+	/// Candidate was found to be valid.
+	Valid,
+	/// Candidate was found to be invalid.
+	Invalid,
+	/// Candidate was found to be unavailable.
+	Unavailable,
+	/// Something went wrong (bug), details can be found in the logs.
+	Error,
+}
+
+impl ParticipationOutcome {
+	/// If validation was successful, get whether the candidate was valid or invalid.
+	pub fn validity(self) -> Option<bool> {
+		match self {
+			Self::Valid => Some(true),
+			Self::Invalid => Some(false),
+			Self::Unavailable | Self::Error => None,
+		}
+	}
+}
+
+impl WorkerMessage {
+	fn from_request(req: ParticipationRequest, outcome: ParticipationOutcome) -> Self {
+		let session = req.session();
+		let (candidate_hash, candidate_receipt) = req.into_candidate_info();
+		Self(ParticipationStatement { session, candidate_hash, candidate_receipt, outcome })
+	}
+}
+
+impl Participation {
+	/// Get ready for managing dispute participation requests.
+	///
+	/// The passed in sender will be used by background workers to communicate back their results.
+	/// The calling context should make sure to call `Participation::on_worker_message()` for the
+	/// received messages.
+	pub fn new(sender: WorkerMessageSender) -> Self {
+		Self {
+			running_participations: HashSet::new(),
+			queue: Queues::new(),
+			worker_sender: sender,
+			recent_block: None,
+		}
+	}
+
+	/// Queue a dispute for the node to participate in.
+	///
+	/// If capacity is available right now and we already got some relay chain head via
+	/// `on_active_leaves_update`, the participation will be launched right away.
+	///
+	/// Returns: false, if queues are already full.
+	pub async fn queue_participation<Context: SubsystemContext>(
+		&mut self,
+		ctx: &mut Context,
+		comparator: Option<CandidateComparator>,
+		req: ParticipationRequest,
+	) -> Result<()> {
+		// Participation already running - we can ignore that request:
+		if self.running_participations.contains(req.candidate_hash()) {
+			return Ok(())
+		}
+		// Available capacity - participate right away (if we already have a recent block):
+		if let Some((_, h)) = self.recent_block {
+			if self.running_participations.len() < MAX_PARALLEL_PARTICIPATIONS {
+				self.fork_participation(ctx, req, h)?;
+				return Ok(())
+			}
+		}
+		// Out of capacity/no recent block yet - queue:
+		Ok(self.queue.queue(comparator, req).map_err(NonFatal::QueueError)?)
+	}
+
+	/// Message from a worker task was received - get the outcome.
+	///
+	/// Call this function to keep participations going and to receive `ParticipationStatement`s.
+	///
+	/// This message has to be called for each received worker message, in order to make sure
+	/// enough participation processes are running at any given time.
+	///
+	/// Returns: The received `ParticipationStatement` or a fatal error, in case
+	/// something went wrong when dequeuing more requests (tasks could not be spawned).
+	pub async fn get_participation_result<Context: SubsystemContext>(
+		&mut self,
+		ctx: &mut Context,
+		msg: WorkerMessage,
+	) -> FatalResult<ParticipationStatement> {
+		let WorkerMessage(statement) = msg;
+		self.running_participations.remove(&statement.candidate_hash);
+		let recent_block = self.recent_block.expect("We never ever reset recent_block to `None` and we already received a result, so it must have been set before. qed.");
+		self.dequeue_until_capacity(ctx, recent_block.1).await?;
+		Ok(statement)
+	}
+
+	/// Process active leaves update.
+	///
+	/// Make sure we to dequeue participations if that became possible and update most recent
+	/// block.
+	pub async fn process_active_leaves_update<Context: SubsystemContext>(
+		&mut self,
+		ctx: &mut Context,
+		update: &ActiveLeavesUpdate,
+	) -> FatalResult<()> {
+		if let Some(activated) = &update.activated {
+			match self.recent_block {
+				None => {
+					self.recent_block = Some((activated.number, activated.hash));
+					// Work got potentially unblocked:
+					self.dequeue_until_capacity(ctx, activated.hash).await?;
+				},
+				Some((number, _)) if activated.number > number => {
+					self.recent_block = Some((activated.number, activated.hash));
+				},
+				Some(_) => {},
+			}
+		}
+		Ok(())
+	}
+
+	/// Dequeue until `MAX_PARALLEL_PARTICIPATIONS` is reached.
+	async fn dequeue_until_capacity<Context: SubsystemContext>(
+		&mut self,
+		ctx: &mut Context,
+		recent_head: Hash,
+	) -> FatalResult<()> {
+		while self.running_participations.len() < MAX_PARALLEL_PARTICIPATIONS {
+			if let Some(req) = self.queue.dequeue() {
+				self.fork_participation(ctx, req, recent_head)?;
+			} else {
+				break
+			}
+		}
+		Ok(())
+	}
+
+	/// Fork a participation task in the background.
+	fn fork_participation<Context: SubsystemContext>(
+		&mut self,
+		ctx: &mut Context,
+		req: ParticipationRequest,
+		recent_head: Hash,
+	) -> FatalResult<()> {
+		if self.running_participations.insert(req.candidate_hash().clone()) {
+			let sender = ctx.sender().clone();
+			ctx.spawn(
+				"participation-worker",
+				participate(self.worker_sender.clone(), sender, recent_head, req).boxed(),
+			)
+			.map_err(Fatal::SpawnFailed)?;
+		}
+		Ok(())
+	}
+}
+
+async fn participate(
+	mut result_sender: WorkerMessageSender,
+	mut sender: impl SubsystemSender,
+	block_hash: Hash,
+	req: ParticipationRequest,
+) {
+	// in order to validate a candidate we need to start by recovering the
+	// available data
+	let (recover_available_data_tx, recover_available_data_rx) = oneshot::channel();
+	sender
+		.send_message(
+			AvailabilityRecoveryMessage::RecoverAvailableData(
+				req.candidate_receipt().clone(),
+				req.session(),
+				None,
+				recover_available_data_tx,
+			)
+			.into(),
+		)
+		.await;
+
+	let available_data = match recover_available_data_rx.await {
+		Err(oneshot::Canceled) => {
+			tracing::warn!(
+				target: LOG_TARGET,
+				"`Oneshot` got cancelled when recovering available data {:?}",
+				req.candidate_hash(),
+			);
+			send_result(&mut result_sender, req, ParticipationOutcome::Error).await;
+			return
+		},
+		Ok(Ok(data)) => data,
+		Ok(Err(RecoveryError::Invalid)) => {
+			// the available data was recovered but it is invalid, therefore we'll
+			// vote negatively for the candidate dispute
+			send_result(&mut result_sender, req, ParticipationOutcome::Invalid).await;
+			return
+		},
+		Ok(Err(RecoveryError::Unavailable)) => {
+			send_result(&mut result_sender, req, ParticipationOutcome::Unavailable).await;
+			return
+		},
+	};
+
+	// we also need to fetch the validation code which we can reference by its
+	// hash as taken from the candidate descriptor
+	let validation_code = match get_validation_code_by_hash(
+		&mut sender,
+		block_hash,
+		req.candidate_receipt().descriptor.validation_code_hash,
+	)
+	.await
+	{
+		Ok(Some(code)) => code,
+		Ok(None) => {
+			tracing::warn!(
+				target: LOG_TARGET,
+				"Validation code unavailable for code hash {:?} in the state of block {:?}",
+				req.candidate_receipt().descriptor.validation_code_hash,
+				block_hash,
+			);
+
+			send_result(&mut result_sender, req, ParticipationOutcome::Error).await;
+			return
+		},
+		Err(err) => {
+			tracing::warn!(target: LOG_TARGET, ?err, "Error when fetching validation code.");
+			send_result(&mut result_sender, req, ParticipationOutcome::Error).await;
+			return
+		},
+	};
+
+	// we dispatch a request to store the available data for the candidate. We
+	// want to maximize data availability for other potential checkers involved
+	// in the dispute
+	let (store_available_data_tx, store_available_data_rx) = oneshot::channel();
+	sender
+		.send_message(
+			AvailabilityStoreMessage::StoreAvailableData {
+				candidate_hash: *req.candidate_hash(),
+				n_validators: req.n_validators() as u32,
+				available_data: available_data.clone(),
+				tx: store_available_data_tx,
+			}
+			.into(),
+		)
+		.await;
+
+	match store_available_data_rx.await {
+		Err(oneshot::Canceled) => {
+			tracing::warn!(
+				target: LOG_TARGET,
+				"`Oneshot` got cancelled when storing available data {:?}",
+				req.candidate_hash(),
+			);
+		},
+		Ok(Err(err)) => {
+			tracing::warn!(
+				target: LOG_TARGET,
+				?err,
+				"Failed to store available data for candidate {:?}",
+				req.candidate_hash(),
+			);
+		},
+		Ok(Ok(())) => {},
+	}
+
+	// Issue a request to validate the candidate with the provided exhaustive
+	// parameters
+	//
+	// We use the approval execution timeout because this is intended to
+	// be run outside of backing and therefore should be subject to the
+	// same level of leeway.
+	let (validation_tx, validation_rx) = oneshot::channel();
+	sender
+		.send_message(
+			CandidateValidationMessage::ValidateFromExhaustive(
+				available_data.validation_data,
+				validation_code,
+				req.candidate_receipt().descriptor.clone(),
+				available_data.pov,
+				APPROVAL_EXECUTION_TIMEOUT,
+				validation_tx,
+			)
+			.into(),
+		)
+		.await;
+
+	// we cast votes (either positive or negative) depending on the outcome of
+	// the validation and if valid, whether the commitments hash matches
+	match validation_rx.await {
+		Err(oneshot::Canceled) => {
+			tracing::warn!(
+				target: LOG_TARGET,
+				"`Oneshot` got cancelled when validating candidate {:?}",
+				req.candidate_hash(),
+			);
+			send_result(&mut result_sender, req, ParticipationOutcome::Error).await;
+			return
+		},
+		Ok(Err(err)) => {
+			tracing::warn!(
+				target: LOG_TARGET,
+				"Candidate {:?} validation failed with: {:?}",
+				req.candidate_hash(),
+				err,
+			);
+
+			send_result(&mut result_sender, req, ParticipationOutcome::Invalid).await;
+		},
+		Ok(Ok(ValidationResult::Invalid(invalid))) => {
+			tracing::warn!(
+				target: LOG_TARGET,
+				"Candidate {:?} considered invalid: {:?}",
+				req.candidate_hash(),
+				invalid,
+			);
+
+			send_result(&mut result_sender, req, ParticipationOutcome::Invalid).await;
+		},
+		Ok(Ok(ValidationResult::Valid(commitments, _))) => {
+			if commitments.hash() != req.candidate_receipt().commitments_hash {
+				tracing::warn!(
+					target: LOG_TARGET,
+					expected = ?req.candidate_receipt().commitments_hash,
+					got = ?commitments.hash(),
+					"Candidate is valid but commitments hash doesn't match",
+				);
+
+				send_result(&mut result_sender, req, ParticipationOutcome::Invalid).await;
+			} else {
+				send_result(&mut result_sender, req, ParticipationOutcome::Valid).await;
+			}
+		},
+	}
+}
+
+/// Helper function for sending the result back and report any error.
+async fn send_result(
+	sender: &mut WorkerMessageSender,
+	req: ParticipationRequest,
+	outcome: ParticipationOutcome,
+) {
+	if let Err(err) = sender.feed(WorkerMessage::from_request(req, outcome)).await {
+		tracing::error!(
+			target: LOG_TARGET,
+			?err,
+			"Sending back participation result failed. Dispute coordinator not working properly!"
+		);
+	}
+}
diff --git a/polkadot/node/core/dispute-coordinator/src/real/participation/queues/mod.rs b/polkadot/node/core/dispute-coordinator/src/real/participation/queues/mod.rs
new file mode 100644
index 00000000000..e1cac851f4b
--- /dev/null
+++ b/polkadot/node/core/dispute-coordinator/src/real/participation/queues/mod.rs
@@ -0,0 +1,210 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use std::collections::{BTreeMap, HashMap};
+
+use thiserror::Error;
+
+use polkadot_primitives::v1::{CandidateHash, CandidateReceipt, SessionIndex};
+
+use crate::real::ordering::CandidateComparator;
+
+#[cfg(test)]
+mod tests;
+
+/// How many potential garbage disputes we want to queue, before starting to drop requests.
+#[cfg(not(test))]
+const BEST_EFFORT_QUEUE_SIZE: usize = 100;
+#[cfg(test)]
+const BEST_EFFORT_QUEUE_SIZE: usize = 3;
+
+/// How many priority disputes can be queued.
+///
+/// Once the queue exceeds that size, we will start to drop the newest participation requests in
+/// the queue. Note that for each vote import the request will be re-added, if there is free
+/// capacity. This limit just serves as a safe guard, it is not expected to ever really be reached.
+///
+/// For 100 parachains, this would allow for every single candidate in 100 blocks on
+/// two forks to get disputed, which should be plenty to deal with any realistic attack.
+#[cfg(not(test))]
+const PRIORITY_QUEUE_SIZE: usize = 20_000;
+#[cfg(test)]
+const PRIORITY_QUEUE_SIZE: usize = 2;
+
+/// Type for counting how often a candidate was added to the best effort queue.
+type BestEffortCount = u32;
+
+/// Queues for dispute participation.
+pub struct Queues {
+	/// Set of best effort participation requests.
+	///
+	/// Note that as size is limited to `BEST_EFFORT_QUEUE_SIZE` we simply do a linear search for
+	/// the entry with the highest `added_count` to determine what dispute to participate next in.
+	///
+	/// This mechanism leads to an amplifying effect - the more validators already participated,
+	/// the more likely it becomes that more validators will participate soon, which should lead to
+	/// a quick resolution of disputes, even in the best effort queue.
+	best_effort: HashMap<CandidateHash, BestEffortEntry>,
+
+	/// Priority queue.
+	///
+	/// In the priority queue, we have a strict ordering of candidates and participation will
+	/// happen in that order.
+	priority: BTreeMap<CandidateComparator, ParticipationRequest>,
+}
+
+/// A dispute participation request that can be queued.
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub struct ParticipationRequest {
+	candidate_hash: CandidateHash,
+	candidate_receipt: CandidateReceipt,
+	session: SessionIndex,
+	n_validators: usize,
+}
+
+/// Entry for the best effort queue.
+struct BestEffortEntry {
+	req: ParticipationRequest,
+	/// How often was the above request added to the queue.
+	added_count: BestEffortCount,
+}
+
+/// What can go wrong when queuing a request.
+#[derive(Debug, Error)]
+pub enum Error {
+	#[error("Request could not be queued, because best effort queue was already full.")]
+	BestEffortFull,
+	#[error("Request could not be queued, because priority queue was already full.")]
+	PriorityFull,
+}
+
+impl ParticipationRequest {
+	/// Create a new `ParticipationRequest` to be queued.
+	pub fn new(
+		candidate_receipt: CandidateReceipt,
+		session: SessionIndex,
+		n_validators: usize,
+	) -> Self {
+		Self { candidate_hash: candidate_receipt.hash(), candidate_receipt, session, n_validators }
+	}
+
+	pub fn candidate_receipt(&'_ self) -> &'_ CandidateReceipt {
+		&self.candidate_receipt
+	}
+	pub fn candidate_hash(&'_ self) -> &'_ CandidateHash {
+		&self.candidate_hash
+	}
+	pub fn session(&self) -> SessionIndex {
+		self.session
+	}
+	pub fn n_validators(&self) -> usize {
+		self.n_validators
+	}
+	pub fn into_candidate_info(self) -> (CandidateHash, CandidateReceipt) {
+		let Self { candidate_hash, candidate_receipt, .. } = self;
+		(candidate_hash, candidate_receipt)
+	}
+}
+
+impl Queues {
+	/// Create new `Queues`.
+	pub fn new() -> Self {
+		Self { best_effort: HashMap::new(), priority: BTreeMap::new() }
+	}
+
+	/// Will put message in queue, either priority or best effort depending on whether a
+	/// `CandidateComparator` was provided or not.
+	///
+	/// If the message was already previously present on best effort, it will be moved to priority
+	/// if a `CandidateComparator` has been passed now, otherwise the `added_count` on the best
+	/// effort queue will be bumped.
+	///
+	/// Returns error in case a queue was found full already.
+	pub fn queue(
+		&mut self,
+		comparator: Option<CandidateComparator>,
+		req: ParticipationRequest,
+	) -> Result<(), Error> {
+		debug_assert!(comparator
+			.map(|c| c.matches_candidate(req.candidate_hash()))
+			.unwrap_or(true));
+
+		if let Some(comparator) = comparator {
+			if self.priority.len() >= PRIORITY_QUEUE_SIZE {
+				return Err(Error::PriorityFull)
+			}
+			// Remove any best effort entry:
+			self.best_effort.remove(&req.candidate_hash);
+			self.priority.insert(comparator, req);
+		} else {
+			if self.best_effort.len() >= BEST_EFFORT_QUEUE_SIZE {
+				return Err(Error::BestEffortFull)
+			}
+			// Note: The request might have been added to priority in a previous call already, we
+			// take care of that case in `dequeue` (more efficient).
+			self.best_effort
+				.entry(req.candidate_hash)
+				.or_insert(BestEffortEntry { req, added_count: 0 })
+				.added_count += 1;
+		}
+		Ok(())
+	}
+
+	/// Get the next best request for dispute participation
+	///
+	/// if any.  Priority queue is always considered first, then the best effort queue based on
+	/// `added_count`.
+	pub fn dequeue(&mut self) -> Option<ParticipationRequest> {
+		if let Some(req) = self.pop_priority() {
+			// In case a candidate became best effort over time, we might have it also queued in
+			// the best effort queue - get rid of any such entry:
+			self.best_effort.remove(req.candidate_hash());
+			return Some(req)
+		}
+		self.pop_best_effort()
+	}
+
+	/// Get the next best from the best effort queue.
+	///
+	/// If there are multiple best - just pick one.
+	fn pop_best_effort(&mut self) -> Option<ParticipationRequest> {
+		let best = self.best_effort.iter().reduce(|(hash1, entry1), (hash2, entry2)| {
+			if entry1.added_count > entry2.added_count {
+				(hash1, entry1)
+			} else {
+				(hash2, entry2)
+			}
+		});
+		if let Some((best_hash, _)) = best {
+			let best_hash = best_hash.clone();
+			self.best_effort.remove(&best_hash).map(|e| e.req)
+		} else {
+			None
+		}
+	}
+
+	/// Get best priority queue entry.
+	fn pop_priority(&mut self) -> Option<ParticipationRequest> {
+		// Once https://github.com/rust-lang/rust/issues/62924 is there, we can use a simple:
+		// priority.pop_first().
+		if let Some((comparator, _)) = self.priority.iter().next() {
+			let comparator = comparator.clone();
+			self.priority.remove(&comparator)
+		} else {
+			None
+		}
+	}
+}
diff --git a/polkadot/node/core/dispute-coordinator/src/real/participation/queues/tests.rs b/polkadot/node/core/dispute-coordinator/src/real/participation/queues/tests.rs
new file mode 100644
index 00000000000..7618545a1b5
--- /dev/null
+++ b/polkadot/node/core/dispute-coordinator/src/real/participation/queues/tests.rs
@@ -0,0 +1,131 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use assert_matches::assert_matches;
+use polkadot_primitives::v1::{BlockNumber, CandidateReceipt, Hash};
+
+use crate::real::ordering::CandidateComparator;
+
+use super::{Error, ParticipationRequest, Queues};
+
+/// Make a `ParticipationRequest` based on the given commitments hash.
+fn make_participation_request(hash: Hash) -> ParticipationRequest {
+	let mut receipt = CandidateReceipt::default();
+	// make it differ:
+	receipt.commitments_hash = hash;
+	ParticipationRequest::new(receipt, 1, 100)
+}
+
+/// Make dummy comparator for request, based on the given block number.
+fn make_dummy_comparator(
+	req: &ParticipationRequest,
+	relay_parent: BlockNumber,
+) -> CandidateComparator {
+	CandidateComparator::new_dummy(relay_parent, *req.candidate_hash())
+}
+
+/// Check that dequeuing acknowledges order.
+///
+/// Any priority item will be dequeued before any best effort items, priority items will be
+/// processed in order. Best effort items, based on how often they have been added.
+#[test]
+fn ordering_works_as_expected() {
+	let mut queue = Queues::new();
+	let req1 = make_participation_request(Hash::repeat_byte(0x01));
+	let req_prio = make_participation_request(Hash::repeat_byte(0x02));
+	let req3 = make_participation_request(Hash::repeat_byte(0x03));
+	let req_prio_2 = make_participation_request(Hash::repeat_byte(0x04));
+	let req5 = make_participation_request(Hash::repeat_byte(0x05));
+	let req_full = make_participation_request(Hash::repeat_byte(0x06));
+	let req_prio_full = make_participation_request(Hash::repeat_byte(0x07));
+	queue.queue(None, req1.clone()).unwrap();
+	queue
+		.queue(Some(make_dummy_comparator(&req_prio, 1)), req_prio.clone())
+		.unwrap();
+	queue.queue(None, req3.clone()).unwrap();
+	queue
+		.queue(Some(make_dummy_comparator(&req_prio_2, 2)), req_prio_2.clone())
+		.unwrap();
+	queue.queue(None, req3.clone()).unwrap();
+	queue.queue(None, req5.clone()).unwrap();
+	assert_matches!(
+		queue.queue(Some(make_dummy_comparator(&req_prio_full, 3)), req_prio_full),
+		Err(Error::PriorityFull)
+	);
+	assert_matches!(queue.queue(None, req_full), Err(Error::BestEffortFull));
+
+	assert_eq!(queue.dequeue(), Some(req_prio));
+	assert_eq!(queue.dequeue(), Some(req_prio_2));
+	assert_eq!(queue.dequeue(), Some(req3));
+	assert_matches!(
+		queue.dequeue(),
+		Some(r) => { assert!(r == req1 || r == req5) }
+	);
+	assert_matches!(
+		queue.dequeue(),
+		Some(r) => { assert!(r == req1 || r == req5) }
+	);
+	assert_matches!(queue.dequeue(), None);
+}
+
+/// No matter how often a candidate gets queued, it should only ever get dequeued once.
+#[test]
+fn candidate_is_only_dequeued_once() {
+	let mut queue = Queues::new();
+	let req1 = make_participation_request(Hash::repeat_byte(0x01));
+	let req_prio = make_participation_request(Hash::repeat_byte(0x02));
+	let req_best_effort_then_prio = make_participation_request(Hash::repeat_byte(0x03));
+	let req_prio_then_best_effort = make_participation_request(Hash::repeat_byte(0x04));
+
+	queue.queue(None, req1.clone()).unwrap();
+	queue
+		.queue(Some(make_dummy_comparator(&req_prio, 1)), req_prio.clone())
+		.unwrap();
+	// Insert same best effort again:
+	queue.queue(None, req1.clone()).unwrap();
+	// insert same prio again:
+	queue
+		.queue(Some(make_dummy_comparator(&req_prio, 1)), req_prio.clone())
+		.unwrap();
+
+	// Insert first as best effort:
+	queue.queue(None, req_best_effort_then_prio.clone()).unwrap();
+	// Then as prio:
+	queue
+		.queue(
+			Some(make_dummy_comparator(&req_best_effort_then_prio, 2)),
+			req_best_effort_then_prio.clone(),
+		)
+		.unwrap();
+
+	// Make space in prio:
+	assert_eq!(queue.dequeue(), Some(req_prio));
+
+	// Insert first as prio:
+	queue
+		.queue(
+			Some(make_dummy_comparator(&req_prio_then_best_effort, 3)),
+			req_prio_then_best_effort.clone(),
+		)
+		.unwrap();
+	// Then as best effort:
+	queue.queue(None, req_prio_then_best_effort.clone()).unwrap();
+
+	assert_eq!(queue.dequeue(), Some(req_best_effort_then_prio));
+	assert_eq!(queue.dequeue(), Some(req_prio_then_best_effort));
+	assert_eq!(queue.dequeue(), Some(req1));
+	assert_eq!(queue.dequeue(), None);
+}
diff --git a/polkadot/node/core/dispute-coordinator/src/real/participation/tests.rs b/polkadot/node/core/dispute-coordinator/src/real/participation/tests.rs
new file mode 100644
index 00000000000..6d776f456fe
--- /dev/null
+++ b/polkadot/node/core/dispute-coordinator/src/real/participation/tests.rs
@@ -0,0 +1,550 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use assert_matches::assert_matches;
+use futures::StreamExt;
+use polkadot_node_subsystem_util::TimeoutExt;
+use std::{sync::Arc, time::Duration};
+
+use sp_core::testing::TaskExecutor;
+
+use super::*;
+use parity_scale_codec::Encode;
+use polkadot_node_primitives::{AvailableData, BlockData, InvalidCandidate, PoV};
+use polkadot_node_subsystem::{
+	jaeger,
+	messages::{
+		AllMessages, DisputeCoordinatorMessage, RuntimeApiMessage, RuntimeApiRequest,
+		ValidationFailed,
+	},
+	ActivatedLeaf, ActiveLeavesUpdate, LeafStatus,
+};
+use polkadot_node_subsystem_test_helpers::{
+	make_subsystem_context, TestSubsystemContext, TestSubsystemContextHandle,
+};
+use polkadot_primitives::v1::{BlakeTwo256, CandidateCommitments, HashT, Header, ValidationCode};
+
+type VirtualOverseer = TestSubsystemContextHandle<DisputeCoordinatorMessage>;
+
+pub fn make_our_subsystem_context<S>(
+	spawn: S,
+) -> (
+	TestSubsystemContext<DisputeCoordinatorMessage, S>,
+	TestSubsystemContextHandle<DisputeCoordinatorMessage>,
+) {
+	make_subsystem_context(spawn)
+}
+
+async fn participate(
+	ctx: &mut impl SubsystemContext,
+	participation: &mut Participation,
+) -> Result<()> {
+	let commitments = CandidateCommitments::default();
+	participate_with_commitments_hash(ctx, participation, commitments.hash()).await
+}
+
+async fn participate_with_commitments_hash(
+	ctx: &mut impl SubsystemContext,
+	participation: &mut Participation,
+	commitments_hash: Hash,
+) -> Result<()> {
+	let candidate_receipt = {
+		let mut receipt = CandidateReceipt::default();
+		receipt.commitments_hash = commitments_hash;
+		receipt
+	};
+	let session = 1;
+	let n_validators = 10;
+
+	let req = ParticipationRequest::new(candidate_receipt, session, n_validators);
+
+	participation.queue_participation(ctx, None, req).await
+}
+
+async fn activate_leaf(
+	ctx: &mut impl SubsystemContext,
+	participation: &mut Participation,
+	block_number: BlockNumber,
+) -> FatalResult<()> {
+	let block_header = Header {
+		parent_hash: BlakeTwo256::hash(&block_number.encode()),
+		number: block_number,
+		digest: Default::default(),
+		state_root: Default::default(),
+		extrinsics_root: Default::default(),
+	};
+
+	let block_hash = block_header.hash();
+
+	participation
+		.process_active_leaves_update(
+			ctx,
+			&ActiveLeavesUpdate::start_work(ActivatedLeaf {
+				hash: block_hash,
+				span: Arc::new(jaeger::Span::Disabled),
+				number: block_number,
+				status: LeafStatus::Fresh,
+			}),
+		)
+		.await
+}
+
+/// Full participation happy path as seen via the overseer.
+pub async fn participation_full_happy_path(ctx_handle: &mut VirtualOverseer) {
+	recover_available_data(ctx_handle).await;
+	fetch_validation_code(ctx_handle).await;
+	store_available_data(ctx_handle, true).await;
+
+	assert_matches!(
+	ctx_handle.recv().await,
+	AllMessages::CandidateValidation(
+		CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx)
+		) if timeout == APPROVAL_EXECUTION_TIMEOUT => {
+		tx.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))).unwrap();
+	},
+	"overseer did not receive candidate validation message",
+	);
+}
+
+/// Full participation with failing availability recovery.
+pub async fn participation_missing_availability(ctx_handle: &mut VirtualOverseer) {
+	assert_matches!(
+		ctx_handle.recv().await,
+		AllMessages::AvailabilityRecovery(
+			AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx)
+		) => {
+			tx.send(Err(RecoveryError::Unavailable)).unwrap();
+		},
+		"overseer did not receive recover available data message",
+	);
+}
+
+async fn recover_available_data(virtual_overseer: &mut VirtualOverseer) {
+	let pov_block = PoV { block_data: BlockData(Vec::new()) };
+
+	let available_data =
+		AvailableData { pov: Arc::new(pov_block), validation_data: Default::default() };
+
+	assert_matches!(
+		virtual_overseer.recv().await,
+		AllMessages::AvailabilityRecovery(
+			AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx)
+		) => {
+			tx.send(Ok(available_data)).unwrap();
+		},
+		"overseer did not receive recover available data message",
+	);
+}
+
+/// Handles validation code fetch, returns the received relay parent hash.
+async fn fetch_validation_code(virtual_overseer: &mut VirtualOverseer) -> Hash {
+	let validation_code = ValidationCode(Vec::new());
+
+	assert_matches!(
+		virtual_overseer.recv().await,
+		AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+			hash,
+			RuntimeApiRequest::ValidationCodeByHash(
+				_,
+				tx,
+			)
+		)) => {
+			tx.send(Ok(Some(validation_code))).unwrap();
+			hash
+		},
+		"overseer did not receive runtime API request for validation code",
+	)
+}
+
+async fn store_available_data(virtual_overseer: &mut VirtualOverseer, success: bool) {
+	assert_matches!(
+		virtual_overseer.recv().await,
+		AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreAvailableData { tx, .. }) => {
+			if success {
+				tx.send(Ok(())).unwrap();
+			} else {
+				tx.send(Err(())).unwrap();
+			}
+		},
+		"overseer did not receive store available data request",
+	);
+}
+
+#[test]
+fn same_req_wont_get_queued_if_participation_is_already_running() {
+	futures::executor::block_on(async {
+		let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new());
+
+		let (sender, mut worker_receiver) = mpsc::channel(1);
+		let mut participation = Participation::new(sender);
+		activate_leaf(&mut ctx, &mut participation, 10).await.unwrap();
+		participate(&mut ctx, &mut participation).await.unwrap();
+		for _ in 0..MAX_PARALLEL_PARTICIPATIONS {
+			participate(&mut ctx, &mut participation).await.unwrap();
+		}
+
+		assert_matches!(
+			ctx_handle.recv().await,
+			AllMessages::AvailabilityRecovery(
+				AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx)
+			) => {
+				tx.send(Err(RecoveryError::Unavailable)).unwrap();
+			},
+			"overseer did not receive recover available data message",
+		);
+
+		let result = participation
+			.get_participation_result(&mut ctx, worker_receiver.next().await.unwrap())
+			.await
+			.unwrap();
+
+		assert_matches!(
+			result.outcome,
+			ParticipationOutcome::Unavailable => {}
+		);
+
+		// we should not have any further results nor recovery requests:
+		assert_matches!(ctx_handle.recv().timeout(Duration::from_millis(10)).await, None);
+		assert_matches!(worker_receiver.next().timeout(Duration::from_millis(10)).await, None);
+	})
+}
+
+#[test]
+fn reqs_get_queued_when_out_of_capacity() {
+	futures::executor::block_on(async {
+		let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new());
+
+		let (sender, mut worker_receiver) = mpsc::channel(1);
+		let mut participation = Participation::new(sender);
+		activate_leaf(&mut ctx, &mut participation, 10).await.unwrap();
+		participate(&mut ctx, &mut participation).await.unwrap();
+		for i in 0..MAX_PARALLEL_PARTICIPATIONS {
+			participate_with_commitments_hash(
+				&mut ctx,
+				&mut participation,
+				Hash::repeat_byte(i as _),
+			)
+			.await
+			.unwrap();
+		}
+
+		for _ in 0..MAX_PARALLEL_PARTICIPATIONS + 1 {
+			assert_matches!(
+				ctx_handle.recv().await,
+				AllMessages::AvailabilityRecovery(
+					AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx)
+				) => {
+					tx.send(Err(RecoveryError::Unavailable)).unwrap();
+				},
+				"overseer did not receive recover available data message",
+			);
+
+			let result = participation
+				.get_participation_result(&mut ctx, worker_receiver.next().await.unwrap())
+				.await
+				.unwrap();
+
+			assert_matches!(
+				result.outcome,
+				ParticipationOutcome::Unavailable => {}
+			);
+		}
+
+		// we should not have any further results nor recovery requests:
+		assert_matches!(ctx_handle.recv().timeout(Duration::from_millis(10)).await, None);
+		assert_matches!(worker_receiver.next().timeout(Duration::from_millis(10)).await, None);
+	})
+}
+
+#[test]
+fn reqs_get_queued_on_no_recent_block() {
+	futures::executor::block_on(async {
+		let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new());
+
+		let (sender, _worker_receiver) = mpsc::channel(1);
+		let mut participation = Participation::new(sender);
+		participate(&mut ctx, &mut participation).await.unwrap();
+		assert!(ctx_handle.recv().timeout(Duration::from_millis(10)).await.is_none());
+		activate_leaf(&mut ctx, &mut participation, 10).await.unwrap();
+
+		// after activating at least one leaf the recent block
+		// state should be available which should lead to trying
+		// to participate by first trying to recover the available
+		// data
+		assert_matches!(
+			ctx_handle.recv().await,
+			AllMessages::AvailabilityRecovery(AvailabilityRecoveryMessage::RecoverAvailableData(
+				..
+			)),
+			"overseer did not receive recover available data message",
+		);
+	})
+}
+
+#[test]
+fn cannot_participate_if_cannot_recover_available_data() {
+	futures::executor::block_on(async {
+		let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new());
+
+		let (sender, mut worker_receiver) = mpsc::channel(1);
+		let mut participation = Participation::new(sender);
+		activate_leaf(&mut ctx, &mut participation, 10).await.unwrap();
+		participate(&mut ctx, &mut participation).await.unwrap();
+
+		assert_matches!(
+			ctx_handle.recv().await,
+			AllMessages::AvailabilityRecovery(
+				AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx)
+			) => {
+				tx.send(Err(RecoveryError::Unavailable)).unwrap();
+			},
+			"overseer did not receive recover available data message",
+		);
+		let result = participation
+			.get_participation_result(&mut ctx, worker_receiver.next().await.unwrap())
+			.await
+			.unwrap();
+		assert_matches!(
+			result.outcome,
+			ParticipationOutcome::Unavailable => {}
+		);
+	})
+}
+
+#[test]
+fn cannot_participate_if_cannot_recover_validation_code() {
+	futures::executor::block_on(async {
+		let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new());
+
+		let (sender, mut worker_receiver) = mpsc::channel(1);
+		let mut participation = Participation::new(sender);
+		activate_leaf(&mut ctx, &mut participation, 10).await.unwrap();
+		participate(&mut ctx, &mut participation).await.unwrap();
+
+		recover_available_data(&mut ctx_handle).await;
+
+		assert_matches!(
+			ctx_handle.recv().await,
+			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+				_,
+				RuntimeApiRequest::ValidationCodeByHash(
+					_,
+					tx,
+				)
+			)) => {
+				tx.send(Ok(None)).unwrap();
+			},
+			"overseer did not receive runtime API request for validation code",
+		);
+
+		let result = participation
+			.get_participation_result(&mut ctx, worker_receiver.next().await.unwrap())
+			.await
+			.unwrap();
+		assert_matches!(
+			result.outcome,
+			ParticipationOutcome::Error => {}
+		);
+	})
+}
+
+#[test]
+fn cast_invalid_vote_if_available_data_is_invalid() {
+	futures::executor::block_on(async {
+		let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new());
+
+		let (sender, mut worker_receiver) = mpsc::channel(1);
+		let mut participation = Participation::new(sender);
+		activate_leaf(&mut ctx, &mut participation, 10).await.unwrap();
+		participate(&mut ctx, &mut participation).await.unwrap();
+
+		assert_matches!(
+			ctx_handle.recv().await,
+			AllMessages::AvailabilityRecovery(
+				AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx)
+			) => {
+				tx.send(Err(RecoveryError::Invalid)).unwrap();
+			},
+			"overseer did not receive recover available data message",
+		);
+
+		let result = participation
+			.get_participation_result(&mut ctx, worker_receiver.next().await.unwrap())
+			.await
+			.unwrap();
+		assert_matches!(
+			result.outcome,
+			ParticipationOutcome::Invalid => {}
+		);
+	})
+}
+
+#[test]
+fn cast_invalid_vote_if_validation_fails_or_is_invalid() {
+	futures::executor::block_on(async {
+		let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new());
+
+		let (sender, mut worker_receiver) = mpsc::channel(1);
+		let mut participation = Participation::new(sender);
+		activate_leaf(&mut ctx, &mut participation, 10).await.unwrap();
+		participate(&mut ctx, &mut participation).await.unwrap();
+
+		recover_available_data(&mut ctx_handle).await;
+		assert_eq!(
+			fetch_validation_code(&mut ctx_handle).await,
+			participation.recent_block.unwrap().1
+		);
+		store_available_data(&mut ctx_handle, true).await;
+
+		assert_matches!(
+			ctx_handle.recv().await,
+			AllMessages::CandidateValidation(
+				CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx)
+			) if timeout == APPROVAL_EXECUTION_TIMEOUT => {
+				tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))).unwrap();
+			},
+			"overseer did not receive candidate validation message",
+		);
+
+		let result = participation
+			.get_participation_result(&mut ctx, worker_receiver.next().await.unwrap())
+			.await
+			.unwrap();
+		assert_matches!(
+			result.outcome,
+			ParticipationOutcome::Invalid => {}
+		);
+	})
+}
+
+#[test]
+fn cast_invalid_vote_if_validation_passes_but_commitments_dont_match() {
+	futures::executor::block_on(async {
+		let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new());
+
+		let (sender, mut worker_receiver) = mpsc::channel(1);
+		let mut participation = Participation::new(sender);
+		activate_leaf(&mut ctx, &mut participation, 10).await.unwrap();
+		participate(&mut ctx, &mut participation).await.unwrap();
+
+		recover_available_data(&mut ctx_handle).await;
+		assert_eq!(
+			fetch_validation_code(&mut ctx_handle).await,
+			participation.recent_block.unwrap().1
+		);
+		store_available_data(&mut ctx_handle, true).await;
+
+		assert_matches!(
+			ctx_handle.recv().await,
+			AllMessages::CandidateValidation(
+				CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx)
+			) if timeout == APPROVAL_EXECUTION_TIMEOUT => {
+				let mut commitments = CandidateCommitments::default();
+				// this should lead to a commitments hash mismatch
+				commitments.processed_downward_messages = 42;
+
+				tx.send(Ok(ValidationResult::Valid(commitments, Default::default()))).unwrap();
+			},
+			"overseer did not receive candidate validation message",
+		);
+
+		let result = participation
+			.get_participation_result(&mut ctx, worker_receiver.next().await.unwrap())
+			.await
+			.unwrap();
+		assert_matches!(
+			result.outcome,
+			ParticipationOutcome::Invalid => {}
+		);
+	})
+}
+
+#[test]
+fn cast_valid_vote_if_validation_passes() {
+	futures::executor::block_on(async {
+		let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new());
+
+		let (sender, mut worker_receiver) = mpsc::channel(1);
+		let mut participation = Participation::new(sender);
+		activate_leaf(&mut ctx, &mut participation, 10).await.unwrap();
+		participate(&mut ctx, &mut participation).await.unwrap();
+
+		recover_available_data(&mut ctx_handle).await;
+		assert_eq!(
+			fetch_validation_code(&mut ctx_handle).await,
+			participation.recent_block.unwrap().1
+		);
+		store_available_data(&mut ctx_handle, true).await;
+
+		assert_matches!(
+			ctx_handle.recv().await,
+			AllMessages::CandidateValidation(
+				CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx)
+			) if timeout == APPROVAL_EXECUTION_TIMEOUT => {
+				tx.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))).unwrap();
+			},
+			"overseer did not receive candidate validation message",
+		);
+
+		let result = participation
+			.get_participation_result(&mut ctx, worker_receiver.next().await.unwrap())
+			.await
+			.unwrap();
+		assert_matches!(
+			result.outcome,
+			ParticipationOutcome::Valid => {}
+		);
+	})
+}
+
+#[test]
+fn failure_to_store_available_data_does_not_preclude_participation() {
+	futures::executor::block_on(async {
+		let (mut ctx, mut ctx_handle) = make_our_subsystem_context(TaskExecutor::new());
+
+		let (sender, mut worker_receiver) = mpsc::channel(1);
+		let mut participation = Participation::new(sender);
+		activate_leaf(&mut ctx, &mut participation, 10).await.unwrap();
+		participate(&mut ctx, &mut participation).await.unwrap();
+
+		recover_available_data(&mut ctx_handle).await;
+		assert_eq!(
+			fetch_validation_code(&mut ctx_handle).await,
+			participation.recent_block.unwrap().1
+		);
+		// the store available data request should fail:
+		store_available_data(&mut ctx_handle, false).await;
+
+		assert_matches!(
+			ctx_handle.recv().await,
+			AllMessages::CandidateValidation(
+				CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx)
+			) if timeout == APPROVAL_EXECUTION_TIMEOUT => {
+				tx.send(Err(ValidationFailed("fail".to_string()))).unwrap();
+			},
+			"overseer did not receive candidate validation message",
+		);
+
+		let result = participation
+			.get_participation_result(&mut ctx, worker_receiver.next().await.unwrap())
+			.await
+			.unwrap();
+		assert_matches!(
+			result.outcome,
+			ParticipationOutcome::Invalid => {}
+		);
+	})
+}
diff --git a/polkadot/node/core/dispute-coordinator/src/real/spam_slots.rs b/polkadot/node/core/dispute-coordinator/src/real/spam_slots.rs
new file mode 100644
index 00000000000..6c8707152a6
--- /dev/null
+++ b/polkadot/node/core/dispute-coordinator/src/real/spam_slots.rs
@@ -0,0 +1,123 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use std::collections::{HashMap, HashSet};
+
+use polkadot_primitives::v1::{CandidateHash, SessionIndex, ValidatorIndex};
+
+use crate::real::LOG_TARGET;
+
+/// Type used for counting potential spam votes.
+type SpamCount = u32;
+
+/// How many unconfirmed disputes a validator is allowed to be a participant in (per session).
+///
+/// Unconfirmed means: Node has not seen the candidate be included on any chain, it has not cast a
+/// vote itself on that dispute, the dispute has not yet reached more than a third of
+/// validator's votes and the including relay chain block has not yet been finalized.
+///
+/// Exact number of `MAX_SPAM_VOTES` is not that important here. It is important that the number is
+/// low enough to not cause resource exhaustion, if multiple validators spend their limits. Also
+/// if things are working properly, this number cannot really be too low either, as all relevant
+/// disputes _should_ have been seen as included my enough validators. (Otherwise the candidate
+/// would not have been available in the first place and could not have been included.) So this is
+/// really just a fallback mechanism if things go terribly wrong.
+const MAX_SPAM_VOTES: SpamCount = 50;
+
+/// Spam slots for raised disputes concerning unknown candidates.
+pub struct SpamSlots {
+	/// Counts per validator and session.
+	///
+	/// Must not exceed `MAX_SPAM_VOTES`.
+	slots: HashMap<(SessionIndex, ValidatorIndex), SpamCount>,
+
+	/// All unconfirmed candidates we are aware of right now.
+	unconfirmed: UnconfirmedDisputes,
+}
+
+/// Unconfirmed disputes to be passed at initialization.
+pub type UnconfirmedDisputes = HashMap<(SessionIndex, CandidateHash), HashSet<ValidatorIndex>>;
+
+impl SpamSlots {
+	/// Recover `SpamSlots` from state on startup.
+	///
+	/// Initialize based on already existing active disputes.
+	pub fn recover_from_state(unconfirmed_disputes: UnconfirmedDisputes) -> Self {
+		let mut slots: HashMap<(SessionIndex, ValidatorIndex), SpamCount> = HashMap::new();
+		for ((session, _), validators) in unconfirmed_disputes.iter() {
+			for validator in validators {
+				let e = slots.entry((*session, *validator)).or_default();
+				*e += 1;
+				if *e > MAX_SPAM_VOTES {
+					tracing::debug!(
+						target: LOG_TARGET,
+						?session,
+						?validator,
+						count = ?e,
+						"Import exceeded spam slot for validator"
+					);
+				}
+			}
+		}
+
+		Self { slots, unconfirmed: unconfirmed_disputes }
+	}
+
+	/// Add an unconfirmed dispute if free slots are available.
+	pub fn add_unconfirmed(
+		&mut self,
+		session: SessionIndex,
+		candidate: CandidateHash,
+		validator: ValidatorIndex,
+	) -> bool {
+		let c = self.slots.entry((session, validator)).or_default();
+		if *c >= MAX_SPAM_VOTES {
+			return false
+		}
+		let validators = self.unconfirmed.entry((session, candidate)).or_default();
+
+		if validators.insert(validator) {
+			*c += 1;
+			true
+		} else {
+			false
+		}
+	}
+
+	/// Clear out spam slots for a given candiate in a session.
+	///
+	/// This effectively reduces the spam slot count for all validators participating in a dispute
+	/// for that candidate. You should call this function once a dispute became obsolete or got
+	/// confirmed and thus votes for it should no longer be treated as potential spam.
+	pub fn clear(&mut self, key: &(SessionIndex, CandidateHash)) {
+		if let Some(validators) = self.unconfirmed.remove(key) {
+			let (session, _) = key;
+			for validator in validators {
+				if let Some(c) = self.slots.remove(&(*session, validator)) {
+					let new = c - 1;
+					if new > 0 {
+						self.slots.insert((*session, validator), new);
+					}
+				}
+			}
+		}
+	}
+	/// Prune all spam slots for sessions older than the given index.
+	pub fn prune_old(&mut self, oldest_index: SessionIndex) {
+		self.unconfirmed.retain(|(session, _), _| *session >= oldest_index);
+		self.slots.retain(|(session, _), _| *session >= oldest_index);
+	}
+}
diff --git a/polkadot/node/core/dispute-coordinator/src/real/status.rs b/polkadot/node/core/dispute-coordinator/src/real/status.rs
new file mode 100644
index 00000000000..c0b01e9b96b
--- /dev/null
+++ b/polkadot/node/core/dispute-coordinator/src/real/status.rs
@@ -0,0 +1,165 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
+
+use std::time::{SystemTime, UNIX_EPOCH};
+
+use parity_scale_codec::{Decode, Encode};
+use polkadot_primitives::v1::{CandidateHash, SessionIndex};
+
+use crate::real::LOG_TARGET;
+
+/// The choice here is fairly arbitrary. But any dispute that concluded more than a few minutes ago
+/// is not worth considering anymore. Changing this value has little to no bearing on consensus,
+/// and really only affects the work that the node might do on startup during periods of many
+/// disputes.
+pub const ACTIVE_DURATION_SECS: Timestamp = 180;
+
+/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS reboots.
+pub type Timestamp = u64;
+
+/// The status of dispute. This is a state machine which can be altered by the
+/// helper methods.
+#[derive(Debug, Clone, Copy, Encode, Decode, PartialEq)]
+pub enum DisputeStatus {
+	/// The dispute is active and unconcluded.
+	#[codec(index = 0)]
+	Active,
+	/// The dispute has been concluded in favor of the candidate
+	/// since the given timestamp.
+	#[codec(index = 1)]
+	ConcludedFor(Timestamp),
+	/// The dispute has been concluded against the candidate
+	/// since the given timestamp.
+	///
+	/// This takes precedence over `ConcludedFor` in the case that
+	/// both are true, which is impossible unless a large amount of
+	/// validators are participating on both sides.
+	#[codec(index = 2)]
+	ConcludedAgainst(Timestamp),
+	/// Dispute has been confirmed (more than `byzantine_threshold` have already participated/ or
+	/// we have seen the candidate included already/participated successfully ourselves).
+	#[codec(index = 3)]
+	Confirmed,
+}
+
+impl DisputeStatus {
+	/// Initialize the status to the active state.
+	pub fn active() -> DisputeStatus {
+		DisputeStatus::Active
+	}
+
+	/// Move status to confirmed status, if not yet concluded/confirmed already.
+	pub fn confirm(self) -> DisputeStatus {
+		match self {
+			DisputeStatus::Active => DisputeStatus::Confirmed,
+			DisputeStatus::Confirmed => DisputeStatus::Confirmed,
+			DisputeStatus::ConcludedFor(_) | DisputeStatus::ConcludedAgainst(_) => self,
+		}
+	}
+
+	/// Check whether the dispute is not a spam dispute.
+	pub fn is_confirmed_concluded(&self) -> bool {
+		match self {
+			&DisputeStatus::Confirmed |
+			&DisputeStatus::ConcludedFor(_) |
+			DisputeStatus::ConcludedAgainst(_) => true,
+			&DisputeStatus::Active => false,
+		}
+	}
+
+	/// Transition the status to a new status after observing the dispute has concluded for the candidate.
+	/// This may be a no-op if the status was already concluded.
+	pub fn concluded_for(self, now: Timestamp) -> DisputeStatus {
+		match self {
+			DisputeStatus::Active | DisputeStatus::Confirmed => DisputeStatus::ConcludedFor(now),
+			DisputeStatus::ConcludedFor(at) => DisputeStatus::ConcludedFor(std::cmp::min(at, now)),
+			against => against,
+		}
+	}
+
+	/// Transition the status to a new status after observing the dispute has concluded against the candidate.
+	/// This may be a no-op if the status was already concluded.
+	pub fn concluded_against(self, now: Timestamp) -> DisputeStatus {
+		match self {
+			DisputeStatus::Active | DisputeStatus::Confirmed =>
+				DisputeStatus::ConcludedAgainst(now),
+			DisputeStatus::ConcludedFor(at) =>
+				DisputeStatus::ConcludedAgainst(std::cmp::min(at, now)),
+			DisputeStatus::ConcludedAgainst(at) =>
+				DisputeStatus::ConcludedAgainst(std::cmp::min(at, now)),
+		}
+	}
+
+	/// Whether the disputed candidate is possibly invalid.
+	pub fn is_possibly_invalid(&self) -> bool {
+		match self {
+			DisputeStatus::Active |
+			DisputeStatus::Confirmed |
+			DisputeStatus::ConcludedAgainst(_) => true,
+			DisputeStatus::ConcludedFor(_) => false,
+		}
+	}
+
+	/// Yields the timestamp this dispute concluded at, if any.
+	pub fn concluded_at(&self) -> Option<Timestamp> {
+		match self {
+			DisputeStatus::Active | DisputeStatus::Confirmed => None,
+			DisputeStatus::ConcludedFor(at) | DisputeStatus::ConcludedAgainst(at) => Some(*at),
+		}
+	}
+}
+
+/// Get active disputes as iterator, preserving its `DisputeStatus`.
+pub fn get_active_with_status(
+	recent_disputes: impl Iterator<Item = ((SessionIndex, CandidateHash), DisputeStatus)>,
+	now: Timestamp,
+) -> impl Iterator<Item = ((SessionIndex, CandidateHash), DisputeStatus)> {
+	recent_disputes.filter_map(move |(disputed, status)| {
+		status
+			.concluded_at()
+			.filter(|at| *at + ACTIVE_DURATION_SECS < now)
+			.map_or(Some((disputed, status)), |_| None)
+	})
+}
+
+pub trait Clock: Send + Sync {
+	fn now(&self) -> Timestamp;
+}
+
+pub struct SystemClock;
+
+impl Clock for SystemClock {
+	fn now(&self) -> Timestamp {
+		// `SystemTime` is notoriously non-monotonic, so our timers might not work
+		// exactly as expected.
+		//
+		// Regardless, disputes are considered active based on an order of minutes,
+		// so a few seconds of slippage in either direction shouldn't affect the
+		// amount of work the node is doing significantly.
+		match SystemTime::now().duration_since(UNIX_EPOCH) {
+			Ok(d) => d.as_secs(),
+			Err(e) => {
+				tracing::warn!(
+					target: LOG_TARGET,
+					err = ?e,
+					"Current time is before unix epoch. Validation will not work correctly."
+				);
+
+				0
+			},
+		}
+	}
+}
diff --git a/polkadot/node/core/dispute-coordinator/src/real/tests.rs b/polkadot/node/core/dispute-coordinator/src/real/tests.rs
index 147fcede8a5..bf1b461b973 100644
--- a/polkadot/node/core/dispute-coordinator/src/real/tests.rs
+++ b/polkadot/node/core/dispute-coordinator/src/real/tests.rs
@@ -14,30 +14,60 @@
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
 
-use std::collections::HashMap;
+use std::{
+	collections::HashMap,
+	sync::{
+		atomic::{AtomicU64, Ordering as AtomicOrdering},
+		Arc,
+	},
+	time::Duration,
+};
 
-use super::*;
 use assert_matches::assert_matches;
 use futures::{
 	channel::oneshot,
 	future::{self, BoxFuture},
 };
-use overseer::TimeoutExt;
+
+use kvdb::KeyValueDB;
 use parity_scale_codec::Encode;
+
+use polkadot_node_primitives::SignedDisputeStatement;
+use polkadot_node_subsystem::{
+	messages::{DisputeCoordinatorMessage, DisputeDistributionMessage, ImportStatementsResult},
+	overseer::FromOverseer,
+	OverseerSignal,
+};
+use polkadot_node_subsystem_util::TimeoutExt;
+use sc_keystore::LocalKeystore;
+use sp_core::testing::TaskExecutor;
+use sp_keyring::Sr25519Keyring;
+use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr};
+
 use polkadot_node_subsystem::{
 	jaeger,
 	messages::{AllMessages, BlockDescription, RuntimeApiMessage, RuntimeApiRequest},
 	ActivatedLeaf, ActiveLeavesUpdate, LeafStatus,
 };
 use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystemContextHandle};
-use polkadot_primitives::v1::{BlakeTwo256, HashT, Header, SessionInfo, ValidatorId};
-use sp_core::testing::TaskExecutor;
-use sp_keyring::Sr25519Keyring;
-use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr};
+use polkadot_primitives::v1::{
+	BlakeTwo256, BlockNumber, CandidateCommitments, CandidateHash, CandidateReceipt, Hash, HashT,
+	Header, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex,
+};
 
-use std::{
-	sync::atomic::{AtomicU64, Ordering as AtomicOrdering},
-	time::Duration,
+use crate::{
+	metrics::Metrics,
+	real::{
+		backend::Backend,
+		participation::{participation_full_happy_path, participation_missing_availability},
+		status::ACTIVE_DURATION_SECS,
+	},
+	Config, DisputeCoordinatorSubsystem,
+};
+
+use super::{
+	db::v1::DbBackend,
+	status::{Clock, Timestamp},
 };
 
 const TEST_TIMEOUT: Duration = Duration::from_secs(2);
@@ -204,13 +234,23 @@ impl TestState {
 			)
 		}
 
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+				_new_leaf,
+				RuntimeApiRequest::CandidateEvents(tx),
+			)) => {
+				tx.send(Ok(Vec::new())).unwrap();
+			}
+		);
+
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 				_new_leaf,
 				RuntimeApiRequest::FetchOnChainVotes(tx),
 			)) => {
-				// add some `BackedCandidates` or resolved disputes here as needed
+				//add some `BackedCandidates` or resolved disputes here as needed
 				tx.send(Ok(Some(ScrapedOnChainVotes::default()))).unwrap();
 			}
 		)
@@ -222,12 +262,12 @@ impl TestState {
 		session: SessionIndex,
 	) {
 		let leaves: Vec<Hash> = self.headers.keys().cloned().collect();
-		for leaf in leaves.iter() {
+		for (n, leaf) in leaves.iter().enumerate() {
 			virtual_overseer
 				.send(FromOverseer::Signal(OverseerSignal::ActiveLeaves(
 					ActiveLeavesUpdate::start_work(ActivatedLeaf {
 						hash: *leaf,
-						number: 1,
+						number: n as u32,
 						span: Arc::new(jaeger::Span::Disabled),
 						status: LeafStatus::Fresh,
 					}),
@@ -286,7 +326,7 @@ impl TestState {
 			Metrics::default(),
 		);
 		let backend = DbBackend::new(self.db.clone(), self.config.column_config());
-		let subsystem_task = run(subsystem, ctx, backend, Box::new(self.clock.clone()));
+		let subsystem_task = subsystem.run(ctx, backend, Box::new(self.clock.clone()));
 		let test_task = test(self, ctx_handle);
 
 		let (_, state) = futures::executor::block_on(future::join(subsystem_task, test_task));
@@ -301,6 +341,33 @@ where
 	TestState::default().resume(test)
 }
 
+/// Handle participation messages.
+async fn participation_with_distribution(
+	virtual_overseer: &mut VirtualOverseer,
+	candidate_hash: &CandidateHash,
+) {
+	participation_full_happy_path(virtual_overseer).await;
+	assert_matches!(
+		virtual_overseer.recv().await,
+		AllMessages::DisputeDistribution(
+			DisputeDistributionMessage::SendDispute(msg)
+		) => {
+			assert_eq!(&msg.candidate_receipt().hash(), candidate_hash);
+		}
+	);
+}
+
+fn make_valid_candidate_receipt() -> CandidateReceipt {
+	let mut candidate_receipt = CandidateReceipt::default();
+	candidate_receipt.commitments_hash = CandidateCommitments::default().hash();
+	candidate_receipt
+}
+
+fn make_invalid_candidate_receipt() -> CandidateReceipt {
+	// Commitments hash will be 0, which is not correct:
+	CandidateReceipt::default()
+}
+
 #[test]
 fn conflicting_votes_lead_to_dispute_participation() {
 	test_harness(|mut test_state, mut virtual_overseer| {
@@ -309,7 +376,7 @@ fn conflicting_votes_lead_to_dispute_participation() {
 
 			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-			let candidate_receipt = CandidateReceipt::default();
+			let candidate_receipt = make_valid_candidate_receipt();
 			let candidate_hash = candidate_receipt.hash();
 
 			test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await;
@@ -338,22 +405,8 @@ fn conflicting_votes_lead_to_dispute_participation() {
 					},
 				})
 				.await;
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::DisputeParticipation(DisputeParticipationMessage::Participate {
-					candidate_hash: c_hash,
-					candidate_receipt: c_receipt,
-					session: s,
-					n_validators,
-					report_availability,
-				}) => {
-					assert_eq!(c_hash, candidate_hash);
-					assert_eq!(c_receipt, candidate_receipt);
-					assert_eq!(s, session);
-					assert_eq!(n_validators, test_state.validators.len() as u32);
-					report_availability.send(true).unwrap();
-				}
-			);
+
+			participation_with_distribution(&mut virtual_overseer, &candidate_hash).await;
 
 			{
 				let (tx, rx) = oneshot::channel();
@@ -376,7 +429,7 @@ fn conflicting_votes_lead_to_dispute_participation() {
 					.await;
 
 				let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone();
-				assert_eq!(votes.valid.len(), 1);
+				assert_eq!(votes.valid.len(), 2);
 				assert_eq!(votes.invalid.len(), 1);
 			}
 
@@ -405,7 +458,7 @@ fn conflicting_votes_lead_to_dispute_participation() {
 					.await;
 
 				let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone();
-				assert_eq!(votes.valid.len(), 1);
+				assert_eq!(votes.valid.len(), 2);
 				assert_eq!(votes.invalid.len(), 2);
 			}
 
@@ -427,7 +480,7 @@ fn positive_votes_dont_trigger_participation() {
 
 			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-			let candidate_receipt = CandidateReceipt::default();
+			let candidate_receipt = make_valid_candidate_receipt();
 			let candidate_hash = candidate_receipt.hash();
 
 			test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await;
@@ -532,7 +585,7 @@ fn wrong_validator_index_is_ignored() {
 
 			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-			let candidate_receipt = CandidateReceipt::default();
+			let candidate_receipt = make_valid_candidate_receipt();
 			let candidate_hash = candidate_receipt.hash();
 
 			test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await;
@@ -602,7 +655,7 @@ fn finality_votes_ignore_disputed_candidates() {
 
 			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-			let candidate_receipt = CandidateReceipt::default();
+			let candidate_receipt = make_valid_candidate_receipt();
 			let candidate_hash = candidate_receipt.hash();
 
 			test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await;
@@ -629,17 +682,7 @@ fn finality_votes_ignore_disputed_candidates() {
 				})
 				.await;
 
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::DisputeParticipation(
-					DisputeParticipationMessage::Participate {
-						report_availability,
-						..
-					}
-				) => {
-					report_availability.send(true).unwrap();
-				}
-			);
+			participation_with_distribution(&mut virtual_overseer, &candidate_hash).await;
 
 			{
 				let (tx, rx) = oneshot::channel();
@@ -705,7 +748,7 @@ fn supermajority_valid_dispute_may_be_finalized() {
 
 			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-			let candidate_receipt = CandidateReceipt::default();
+			let candidate_receipt = make_valid_candidate_receipt();
 			let candidate_hash = candidate_receipt.hash();
 
 			test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await;
@@ -735,23 +778,7 @@ fn supermajority_valid_dispute_may_be_finalized() {
 				})
 				.await;
 
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::DisputeParticipation(
-					DisputeParticipationMessage::Participate {
-						candidate_hash: c_hash,
-						candidate_receipt: c_receipt,
-						session: s,
-						report_availability,
-						..
-					}
-				) => {
-					assert_eq!(candidate_hash, c_hash);
-					assert_eq!(candidate_receipt, c_receipt);
-					assert_eq!(session, s);
-					report_availability.send(true).unwrap();
-				}
-			);
+			participation_with_distribution(&mut virtual_overseer, &candidate_hash).await;
 
 			let mut statements = Vec::new();
 			for i in (0..supermajority_threshold - 1).map(|i| i + 3) {
@@ -838,7 +865,7 @@ fn concluded_supermajority_for_non_active_after_time() {
 
 			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-			let candidate_receipt = CandidateReceipt::default();
+			let candidate_receipt = make_valid_candidate_receipt();
 			let candidate_hash = candidate_receipt.hash();
 
 			test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await;
@@ -868,20 +895,11 @@ fn concluded_supermajority_for_non_active_after_time() {
 				})
 				.await;
 
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::DisputeParticipation(
-					DisputeParticipationMessage::Participate {
-						report_availability,
-						..
-					}
-				) => {
-					report_availability.send(true).unwrap();
-				}
-			);
+			participation_with_distribution(&mut virtual_overseer, &candidate_hash).await;
 
 			let mut statements = Vec::new();
-			for i in (0..supermajority_threshold - 1).map(|i| i + 3) {
+			// -2: 1 for already imported vote and one for local vote (which is valid).
+			for i in (0..supermajority_threshold - 2).map(|i| i + 3) {
 				let vote =
 					test_state.issue_statement_with_index(i, candidate_hash, session, true).await;
 
@@ -941,7 +959,8 @@ fn concluded_supermajority_against_non_active_after_time() {
 
 			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-			let candidate_receipt = CandidateReceipt::default();
+			let candidate_receipt = make_invalid_candidate_receipt();
+
 			let candidate_hash = candidate_receipt.hash();
 
 			test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await;
@@ -955,7 +974,7 @@ fn concluded_supermajority_against_non_active_after_time() {
 			let invalid_vote =
 				test_state.issue_statement_with_index(1, candidate_hash, session, false).await;
 
-			let (pending_confirmation, _confirmation_rx) = oneshot::channel();
+			let (pending_confirmation, confirmation_rx) = oneshot::channel();
 			virtual_overseer
 				.send(FromOverseer::Communication {
 					msg: DisputeCoordinatorMessage::ImportStatements {
@@ -970,21 +989,15 @@ fn concluded_supermajority_against_non_active_after_time() {
 					},
 				})
 				.await;
-
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::DisputeParticipation(
-					DisputeParticipationMessage::Participate {
-						report_availability,
-						..
-					}
-				) => {
-					report_availability.send(true).unwrap();
-				}
+			assert_matches!(confirmation_rx.await.unwrap(),
+				ImportStatementsResult::ValidImport => {}
 			);
 
+			participation_with_distribution(&mut virtual_overseer, &candidate_hash).await;
+
 			let mut statements = Vec::new();
-			for i in (0..supermajority_threshold - 1).map(|i| i + 3) {
+			// minus 2, because of local vote and one previously imported invalid vote.
+			for i in (0..supermajority_threshold - 2).map(|i| i + 3) {
 				let vote =
 					test_state.issue_statement_with_index(i, candidate_hash, session, false).await;
 
@@ -1029,85 +1042,11 @@ fn concluded_supermajority_against_non_active_after_time() {
 			}
 
 			virtual_overseer.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
-			assert!(virtual_overseer.try_recv().await.is_none());
-
-			test_state
-		})
-	});
-}
-
-#[test]
-fn fresh_dispute_ignored_if_unavailable() {
-	test_harness(|mut test_state, mut virtual_overseer| {
-		Box::pin(async move {
-			let session = 1;
-
-			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
-
-			let candidate_receipt = CandidateReceipt::default();
-			let candidate_hash = candidate_receipt.hash();
-
-			test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await;
-
-			let valid_vote =
-				test_state.issue_statement_with_index(2, candidate_hash, session, true).await;
-
-			let invalid_vote =
-				test_state.issue_statement_with_index(1, candidate_hash, session, false).await;
-
-			let (pending_confirmation, _confirmation_rx) = oneshot::channel();
-			virtual_overseer
-				.send(FromOverseer::Communication {
-					msg: DisputeCoordinatorMessage::ImportStatements {
-						candidate_hash,
-						candidate_receipt: candidate_receipt.clone(),
-						session,
-						statements: vec![
-							(valid_vote, ValidatorIndex(2)),
-							(invalid_vote, ValidatorIndex(1)),
-						],
-						pending_confirmation,
-					},
-				})
-				.await;
-
 			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::DisputeParticipation(
-					DisputeParticipationMessage::Participate {
-						report_availability,
-						..
-					}
-				) => {
-					report_availability.send(false).unwrap();
-				}
+				virtual_overseer.try_recv().await,
+				None => {}
 			);
 
-			{
-				let (tx, rx) = oneshot::channel();
-
-				virtual_overseer
-					.send(FromOverseer::Communication {
-						msg: DisputeCoordinatorMessage::ActiveDisputes(tx),
-					})
-					.await;
-
-				assert!(rx.await.unwrap().is_empty());
-
-				let (tx, rx) = oneshot::channel();
-
-				virtual_overseer
-					.send(FromOverseer::Communication {
-						msg: DisputeCoordinatorMessage::RecentDisputes(tx),
-					})
-					.await;
-
-				assert!(rx.await.unwrap().is_empty());
-			}
-
-			virtual_overseer.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
-			assert!(virtual_overseer.try_recv().await.is_none());
-
 			test_state
 		})
 	});
@@ -1121,7 +1060,7 @@ fn resume_dispute_without_local_statement() {
 		Box::pin(async move {
 			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-			let candidate_receipt = CandidateReceipt::default();
+			let candidate_receipt = make_valid_candidate_receipt();
 			let candidate_hash = candidate_receipt.hash();
 
 			test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await;
@@ -1148,17 +1087,8 @@ fn resume_dispute_without_local_statement() {
 				})
 				.await;
 
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::DisputeParticipation(
-					DisputeParticipationMessage::Participate {
-						report_availability,
-						..
-					}
-				) => {
-					report_availability.send(true).unwrap();
-				}
-			);
+			// Missing availability -> No local vote.
+			participation_missing_availability(&mut virtual_overseer).await;
 
 			assert_eq!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport));
 
@@ -1186,26 +1116,10 @@ fn resume_dispute_without_local_statement() {
 		Box::pin(async move {
 			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-			let candidate_receipt = CandidateReceipt::default();
+			let candidate_receipt = make_valid_candidate_receipt();
 			let candidate_hash = candidate_receipt.hash();
 
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::DisputeParticipation(
-					DisputeParticipationMessage::Participate {
-						candidate_hash: c_hash,
-						candidate_receipt: c_receipt,
-						session: s,
-						report_availability,
-						..
-					}
-				) => {
-					assert_eq!(candidate_hash, c_hash);
-					assert_eq!(candidate_receipt, c_receipt);
-					assert_eq!(session, s);
-					report_availability.send(true).unwrap();
-				}
-			);
+			participation_with_distribution(&mut virtual_overseer, &candidate_hash).await;
 
 			let valid_vote0 =
 				test_state.issue_statement_with_index(0, candidate_hash, session, true).await;
@@ -1266,7 +1180,7 @@ fn resume_dispute_with_local_statement() {
 		Box::pin(async move {
 			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-			let candidate_receipt = CandidateReceipt::default();
+			let candidate_receipt = make_valid_candidate_receipt();
 			let candidate_hash = candidate_receipt.hash();
 
 			test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await;
@@ -1344,7 +1258,7 @@ fn resume_dispute_without_local_statement_or_local_key() {
 			Box::pin(async move {
 				test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-				let candidate_receipt = CandidateReceipt::default();
+				let candidate_receipt = make_valid_candidate_receipt();
 				let candidate_hash = candidate_receipt.hash();
 
 				test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await;
@@ -1386,7 +1300,10 @@ fn resume_dispute_without_local_statement_or_local_key() {
 				}
 
 				virtual_overseer.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
-				assert!(virtual_overseer.try_recv().await.is_none());
+				assert_matches!(
+					virtual_overseer.try_recv().await,
+					None => {}
+				);
 
 				test_state
 			})
@@ -1417,7 +1334,7 @@ fn resume_dispute_with_local_statement_without_local_key() {
 		Box::pin(async move {
 			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-			let candidate_receipt = CandidateReceipt::default();
+			let candidate_receipt = make_valid_candidate_receipt();
 			let candidate_hash = candidate_receipt.hash();
 
 			test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await;
@@ -1505,7 +1422,7 @@ fn issue_local_statement_does_cause_distribution_but_not_duplicate_participation
 
 			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-			let candidate_receipt = CandidateReceipt::default();
+			let candidate_receipt = make_valid_candidate_receipt();
 			let candidate_hash = candidate_receipt.hash();
 
 			test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await;
@@ -1552,7 +1469,7 @@ fn issue_local_statement_does_cause_distribution_but_not_duplicate_participation
 				}
 			);
 
-			// Make sure we don't get a `DisputeParticiationMessage`.
+			// Make sure we won't participate:
 			assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none());
 
 			virtual_overseer.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
@@ -1571,7 +1488,7 @@ fn negative_issue_local_statement_only_triggers_import() {
 
 			test_state.handle_resume_sync(&mut virtual_overseer, session).await;
 
-			let candidate_receipt = CandidateReceipt::default();
+			let candidate_receipt = make_invalid_candidate_receipt();
 			let candidate_hash = candidate_receipt.hash();
 
 			test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await;
@@ -1596,7 +1513,7 @@ fn negative_issue_local_statement_only_triggers_import() {
 			let disputes = backend.load_recent_disputes().unwrap();
 			assert_eq!(disputes, None);
 
-			// Assert that subsystem is not sending Participation messages:
+			// Assert that subsystem is not participating.
 			assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none());
 
 			virtual_overseer.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
diff --git a/polkadot/node/core/dispute-participation/Cargo.toml b/polkadot/node/core/dispute-participation/Cargo.toml
deleted file mode 100644
index 98a11599ad3..00000000000
--- a/polkadot/node/core/dispute-participation/Cargo.toml
+++ /dev/null
@@ -1,20 +0,0 @@
-[package]
-name = "polkadot-node-core-dispute-participation"
-version = "0.9.13"
-authors = ["Parity Technologies <admin@parity.io>"]
-edition = "2018"
-
-[dependencies]
-futures = "0.3.17"
-thiserror = "1.0.30"
-tracing = "0.1.29"
-
-polkadot-node-primitives = { path = "../../primitives" }
-polkadot-node-subsystem = { path = "../../subsystem" }
-polkadot-primitives = { path = "../../../primitives" }
-
-[dev-dependencies]
-assert_matches = "1.5.0"
-parity-scale-codec = "2.3.1"
-polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers"}
-sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/node/core/dispute-participation/src/lib.rs b/polkadot/node/core/dispute-participation/src/lib.rs
deleted file mode 100644
index 21258ad5be1..00000000000
--- a/polkadot/node/core/dispute-participation/src/lib.rs
+++ /dev/null
@@ -1,372 +0,0 @@
-// Copyright 2021 Parity Technologies (UK) Ltd.
-// This file is part of Polkadot.
-
-// Polkadot is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Polkadot is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
-
-//! Implements the dispute participation subsystem.
-//!
-//! This subsystem is responsible for actually participating in disputes: when
-//! notified of a dispute, we recover the candidate data, validate the
-//! candidate, and cast our vote in the dispute.
-
-use futures::{channel::oneshot, prelude::*};
-
-use polkadot_node_primitives::{ValidationResult, APPROVAL_EXECUTION_TIMEOUT};
-use polkadot_node_subsystem::{
-	errors::{RecoveryError, RuntimeApiError},
-	messages::{
-		AvailabilityRecoveryMessage, AvailabilityStoreMessage, CandidateValidationMessage,
-		DisputeCoordinatorMessage, DisputeParticipationMessage, RuntimeApiMessage,
-		RuntimeApiRequest,
-	},
-	overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext,
-	SubsystemError,
-};
-use polkadot_primitives::v1::{BlockNumber, CandidateHash, CandidateReceipt, Hash, SessionIndex};
-
-#[cfg(test)]
-mod tests;
-
-const LOG_TARGET: &str = "parachain::dispute-participation";
-
-struct State {
-	recent_block: Option<(BlockNumber, Hash)>,
-}
-
-/// An implementation of the dispute participation subsystem.
-pub struct DisputeParticipationSubsystem;
-
-impl DisputeParticipationSubsystem {
-	/// Create a new instance of the subsystem.
-	pub fn new() -> Self {
-		DisputeParticipationSubsystem
-	}
-}
-
-impl<Context> overseer::Subsystem<Context, SubsystemError> for DisputeParticipationSubsystem
-where
-	Context: SubsystemContext<Message = DisputeParticipationMessage>,
-	Context: overseer::SubsystemContext<Message = DisputeParticipationMessage>,
-{
-	fn start(self, ctx: Context) -> SpawnedSubsystem {
-		let future = run(ctx).map(|_| Ok(())).boxed();
-
-		SpawnedSubsystem { name: "dispute-participation-subsystem", future }
-	}
-}
-
-#[derive(Debug, thiserror::Error)]
-#[allow(missing_docs)]
-pub enum Error {
-	#[error(transparent)]
-	RuntimeApi(#[from] RuntimeApiError),
-
-	#[error(transparent)]
-	Subsystem(#[from] SubsystemError),
-
-	#[error(transparent)]
-	Oneshot(#[from] oneshot::Canceled),
-
-	#[error("Oneshot receiver died")]
-	OneshotSendFailed,
-
-	#[error(transparent)]
-	Participation(#[from] ParticipationError),
-}
-
-#[derive(Debug, thiserror::Error)]
-pub enum ParticipationError {
-	#[error("Missing recent block state to participate in dispute")]
-	MissingRecentBlockState,
-	#[error("Failed to recover available data for candidate {0}")]
-	MissingAvailableData(CandidateHash),
-	#[error("Failed to recover validation code for candidate {0}")]
-	MissingValidationCode(CandidateHash),
-}
-
-impl Error {
-	fn trace(&self) {
-		match self {
-			// don't spam the log with spurious errors
-			Self::RuntimeApi(_) | Self::Oneshot(_) => {
-				tracing::debug!(target: LOG_TARGET, err = ?self)
-			},
-			// it's worth reporting otherwise
-			_ => tracing::warn!(target: LOG_TARGET, err = ?self),
-		}
-	}
-}
-
-async fn run<Context>(mut ctx: Context)
-where
-	Context: SubsystemContext<Message = DisputeParticipationMessage>,
-	Context: overseer::SubsystemContext<Message = DisputeParticipationMessage>,
-{
-	let mut state = State { recent_block: None };
-
-	loop {
-		match ctx.recv().await {
-			Err(_) => return,
-			Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => {
-				tracing::info!(target: LOG_TARGET, "Received `Conclude` signal, exiting");
-				return
-			},
-			Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(_, _))) => {},
-			Ok(FromOverseer::Signal(OverseerSignal::ActiveLeaves(update))) => {
-				update_state(&mut state, update);
-			},
-			Ok(FromOverseer::Communication { msg }) => {
-				if let Err(err) = handle_incoming(&mut ctx, &mut state, msg).await {
-					err.trace();
-					if let Error::Subsystem(SubsystemError::Context(_)) = err {
-						return
-					}
-				}
-			},
-		}
-	}
-}
-
-fn update_state(state: &mut State, update: ActiveLeavesUpdate) {
-	for active in update.activated {
-		if state.recent_block.map_or(true, |s| active.number > s.0) {
-			state.recent_block = Some((active.number, active.hash));
-		}
-	}
-}
-
-async fn handle_incoming(
-	ctx: &mut impl SubsystemContext,
-	state: &mut State,
-	message: DisputeParticipationMessage,
-) -> Result<(), Error> {
-	match message {
-		DisputeParticipationMessage::Participate {
-			candidate_hash,
-			candidate_receipt,
-			session,
-			n_validators,
-			report_availability,
-		} =>
-			if let Some((_, block_hash)) = state.recent_block {
-				participate(
-					ctx,
-					block_hash,
-					candidate_hash,
-					candidate_receipt,
-					session,
-					n_validators,
-					report_availability,
-				)
-				.await
-			} else {
-				return Err(ParticipationError::MissingRecentBlockState.into())
-			},
-	}
-}
-
-async fn participate(
-	ctx: &mut impl SubsystemContext,
-	block_hash: Hash,
-	candidate_hash: CandidateHash,
-	candidate_receipt: CandidateReceipt,
-	session: SessionIndex,
-	n_validators: u32,
-	report_availability: oneshot::Sender<bool>,
-) -> Result<(), Error> {
-	let (recover_available_data_tx, recover_available_data_rx) = oneshot::channel();
-	let (code_tx, code_rx) = oneshot::channel();
-	let (store_available_data_tx, store_available_data_rx) = oneshot::channel();
-	let (validation_tx, validation_rx) = oneshot::channel();
-
-	// in order to validate a candidate we need to start by recovering the
-	// available data
-	ctx.send_message(AvailabilityRecoveryMessage::RecoverAvailableData(
-		candidate_receipt.clone(),
-		session,
-		None,
-		recover_available_data_tx,
-	))
-	.await;
-
-	let available_data = match recover_available_data_rx.await? {
-		Ok(data) => {
-			report_availability.send(true).map_err(|_| Error::OneshotSendFailed)?;
-			data
-		},
-		Err(RecoveryError::Invalid) => {
-			report_availability.send(true).map_err(|_| Error::OneshotSendFailed)?;
-
-			// the available data was recovered but it is invalid, therefore we'll
-			// vote negatively for the candidate dispute
-			cast_invalid_vote(ctx, candidate_hash, candidate_receipt, session).await;
-			return Ok(())
-		},
-		Err(RecoveryError::Unavailable) => {
-			report_availability.send(false).map_err(|_| Error::OneshotSendFailed)?;
-
-			return Err(ParticipationError::MissingAvailableData(candidate_hash).into())
-		},
-	};
-
-	// we also need to fetch the validation code which we can reference by its
-	// hash as taken from the candidate descriptor
-	ctx.send_message(RuntimeApiMessage::Request(
-		block_hash,
-		RuntimeApiRequest::ValidationCodeByHash(
-			candidate_receipt.descriptor.validation_code_hash,
-			code_tx,
-		),
-	))
-	.await;
-
-	let validation_code = match code_rx.await?? {
-		Some(code) => code,
-		None => {
-			tracing::warn!(
-				target: LOG_TARGET,
-				"Validation code unavailable for code hash {:?} in the state of block {:?}",
-				candidate_receipt.descriptor.validation_code_hash,
-				block_hash,
-			);
-
-			return Err(ParticipationError::MissingValidationCode(candidate_hash).into())
-		},
-	};
-
-	// we dispatch a request to store the available data for the candidate. we
-	// want to maximize data availability for other potential checkers involved
-	// in the dispute
-	ctx.send_message(AvailabilityStoreMessage::StoreAvailableData {
-		candidate_hash,
-		n_validators,
-		available_data: available_data.clone(),
-		tx: store_available_data_tx,
-	})
-	.await;
-
-	match store_available_data_rx.await? {
-		Err(_) => {
-			tracing::warn!(
-				target: LOG_TARGET,
-				"Failed to store available data for candidate {:?}",
-				candidate_hash,
-			);
-		},
-		Ok(()) => {},
-	}
-
-	// we issue a request to validate the candidate with the provided exhaustive
-	// parameters
-	//
-	// We use the approval execution timeout because this is intended to
-	// be run outside of backing and therefore should be subject to the
-	// same level of leeway.
-	ctx.send_message(CandidateValidationMessage::ValidateFromExhaustive(
-		available_data.validation_data,
-		validation_code,
-		candidate_receipt.descriptor.clone(),
-		available_data.pov,
-		APPROVAL_EXECUTION_TIMEOUT,
-		validation_tx,
-	))
-	.await;
-
-	// we cast votes (either positive or negative) depending on the outcome of
-	// the validation and if valid, whether the commitments hash matches
-	match validation_rx.await? {
-		Err(err) => {
-			tracing::warn!(
-				target: LOG_TARGET,
-				"Candidate {:?} validation failed with: {:?}",
-				candidate_receipt.hash(),
-				err,
-			);
-
-			cast_invalid_vote(ctx, candidate_hash, candidate_receipt, session).await;
-		},
-		Ok(ValidationResult::Invalid(invalid)) => {
-			tracing::warn!(
-				target: LOG_TARGET,
-				"Candidate {:?} considered invalid: {:?}",
-				candidate_hash,
-				invalid,
-			);
-
-			cast_invalid_vote(ctx, candidate_hash, candidate_receipt, session).await;
-		},
-		Ok(ValidationResult::Valid(commitments, _)) => {
-			if commitments.hash() != candidate_receipt.commitments_hash {
-				tracing::warn!(
-					target: LOG_TARGET,
-					expected = ?candidate_receipt.commitments_hash,
-					got = ?commitments.hash(),
-					"Candidate is valid but commitments hash doesn't match",
-				);
-
-				cast_invalid_vote(ctx, candidate_hash, candidate_receipt, session).await;
-			} else {
-				cast_valid_vote(ctx, candidate_hash, candidate_receipt, session).await;
-			}
-		},
-	}
-
-	Ok(())
-}
-
-async fn cast_valid_vote(
-	ctx: &mut impl SubsystemContext,
-	candidate_hash: CandidateHash,
-	candidate_receipt: CandidateReceipt,
-	session: SessionIndex,
-) {
-	tracing::info!(
-		target: LOG_TARGET,
-		"Casting valid vote in dispute for candidate {:?}",
-		candidate_hash,
-	);
-
-	issue_local_statement(ctx, candidate_hash, candidate_receipt, session, true).await;
-}
-
-async fn cast_invalid_vote(
-	ctx: &mut impl SubsystemContext,
-	candidate_hash: CandidateHash,
-	candidate_receipt: CandidateReceipt,
-	session: SessionIndex,
-) {
-	tracing::info!(
-		target: LOG_TARGET,
-		"Casting invalid vote in dispute for candidate {:?}",
-		candidate_hash,
-	);
-
-	issue_local_statement(ctx, candidate_hash, candidate_receipt, session, false).await;
-}
-
-async fn issue_local_statement(
-	ctx: &mut impl SubsystemContext,
-	candidate_hash: CandidateHash,
-	candidate_receipt: CandidateReceipt,
-	session: SessionIndex,
-	valid: bool,
-) {
-	ctx.send_message(DisputeCoordinatorMessage::IssueLocalStatement(
-		session,
-		candidate_hash,
-		candidate_receipt,
-		valid,
-	))
-	.await
-}
diff --git a/polkadot/node/core/dispute-participation/src/tests.rs b/polkadot/node/core/dispute-participation/src/tests.rs
deleted file mode 100644
index 513f673f81a..00000000000
--- a/polkadot/node/core/dispute-participation/src/tests.rs
+++ /dev/null
@@ -1,432 +0,0 @@
-// Copyright 2021 Parity Technologies (UK) Ltd.
-// This file is part of Polkadot.
-
-// Polkadot is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Polkadot is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
-
-use assert_matches::assert_matches;
-use futures::future::{self, BoxFuture};
-use std::sync::Arc;
-
-use sp_core::testing::TaskExecutor;
-
-use super::*;
-use parity_scale_codec::Encode;
-use polkadot_node_primitives::{AvailableData, BlockData, InvalidCandidate, PoV};
-use polkadot_node_subsystem::{
-	jaeger,
-	messages::{AllMessages, ValidationFailed},
-	overseer::Subsystem,
-	ActivatedLeaf, ActiveLeavesUpdate, LeafStatus,
-};
-use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystemContextHandle};
-use polkadot_primitives::v1::{BlakeTwo256, CandidateCommitments, HashT, Header, ValidationCode};
-
-type VirtualOverseer = TestSubsystemContextHandle<DisputeParticipationMessage>;
-
-fn test_harness<F>(test: F)
-where
-	F: FnOnce(VirtualOverseer) -> BoxFuture<'static, VirtualOverseer>,
-{
-	let (ctx, ctx_handle) = make_subsystem_context(TaskExecutor::new());
-
-	let subsystem = DisputeParticipationSubsystem::new();
-	let spawned_subsystem = subsystem.start(ctx);
-	let test_future = test(ctx_handle);
-
-	let (subsystem_result, _) =
-		futures::executor::block_on(future::join(spawned_subsystem.future, async move {
-			let mut ctx_handle = test_future.await;
-			ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await;
-
-			// no further request is received by the overseer which means that
-			// no further attempt to participate was made
-			assert!(ctx_handle.try_recv().await.is_none());
-		}));
-
-	subsystem_result.unwrap();
-}
-
-async fn activate_leaf(virtual_overseer: &mut VirtualOverseer, block_number: BlockNumber) {
-	let block_header = Header {
-		parent_hash: BlakeTwo256::hash(&block_number.encode()),
-		number: block_number,
-		digest: Default::default(),
-		state_root: Default::default(),
-		extrinsics_root: Default::default(),
-	};
-
-	let block_hash = block_header.hash();
-
-	virtual_overseer
-		.send(FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(
-			ActivatedLeaf {
-				hash: block_hash,
-				span: Arc::new(jaeger::Span::Disabled),
-				number: block_number,
-				status: LeafStatus::Fresh,
-			},
-		))))
-		.await;
-}
-
-async fn participate(virtual_overseer: &mut VirtualOverseer) -> oneshot::Receiver<bool> {
-	let commitments = CandidateCommitments::default();
-	let candidate_receipt = {
-		let mut receipt = CandidateReceipt::default();
-		receipt.commitments_hash = commitments.hash();
-		receipt
-	};
-	let candidate_hash = candidate_receipt.hash();
-	let session = 1;
-	let n_validators = 10;
-
-	let (report_availability, receive_availability) = oneshot::channel();
-
-	virtual_overseer
-		.send(FromOverseer::Communication {
-			msg: DisputeParticipationMessage::Participate {
-				candidate_hash,
-				candidate_receipt: candidate_receipt.clone(),
-				session,
-				n_validators,
-				report_availability,
-			},
-		})
-		.await;
-	receive_availability
-}
-
-async fn recover_available_data(
-	virtual_overseer: &mut VirtualOverseer,
-	receive_availability: oneshot::Receiver<bool>,
-) {
-	let pov_block = PoV { block_data: BlockData(Vec::new()) };
-
-	let available_data =
-		AvailableData { pov: Arc::new(pov_block), validation_data: Default::default() };
-
-	assert_matches!(
-		virtual_overseer.recv().await,
-		AllMessages::AvailabilityRecovery(
-			AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx)
-		) => {
-			tx.send(Ok(available_data)).unwrap();
-		},
-		"overseer did not receive recover available data message",
-	);
-
-	assert_eq!(receive_availability.await.expect("Availability should get reported"), true);
-}
-
-async fn fetch_validation_code(virtual_overseer: &mut VirtualOverseer) {
-	let validation_code = ValidationCode(Vec::new());
-
-	assert_matches!(
-		virtual_overseer.recv().await,
-		AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-			_,
-			RuntimeApiRequest::ValidationCodeByHash(
-				_,
-				tx,
-			)
-		)) => {
-			tx.send(Ok(Some(validation_code))).unwrap();
-		},
-		"overseer did not receive runtime API request for validation code",
-	);
-}
-
-async fn store_available_data(virtual_overseer: &mut VirtualOverseer, success: bool) {
-	assert_matches!(
-		virtual_overseer.recv().await,
-		AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreAvailableData { tx, .. }) => {
-			if success {
-				tx.send(Ok(())).unwrap();
-			} else {
-				tx.send(Err(())).unwrap();
-			}
-		},
-		"overseer did not receive store available data request",
-	);
-}
-
-#[test]
-fn cannot_participate_when_recent_block_state_is_missing() {
-	test_harness(|mut virtual_overseer| {
-		Box::pin(async move {
-			let _ = participate(&mut virtual_overseer).await;
-
-			virtual_overseer
-		})
-	});
-
-	test_harness(|mut virtual_overseer| {
-		Box::pin(async move {
-			activate_leaf(&mut virtual_overseer, 10).await;
-			let _ = participate(&mut virtual_overseer).await;
-
-			// after activating at least one leaf the recent block
-			// state should be available which should lead to trying
-			// to participate by first trying to recover the available
-			// data
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::AvailabilityRecovery(
-					AvailabilityRecoveryMessage::RecoverAvailableData(..)
-				),
-				"overseer did not receive recover available data message",
-			);
-
-			virtual_overseer
-		})
-	});
-}
-
-#[test]
-fn cannot_participate_if_cannot_recover_available_data() {
-	test_harness(|mut virtual_overseer| {
-		Box::pin(async move {
-			activate_leaf(&mut virtual_overseer, 10).await;
-			let receive_availability = participate(&mut virtual_overseer).await;
-
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::AvailabilityRecovery(
-					AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx)
-				) => {
-					tx.send(Err(RecoveryError::Unavailable)).unwrap();
-				},
-				"overseer did not receive recover available data message",
-			);
-
-			assert_eq!(
-				receive_availability.await.expect("Availability should get reported"),
-				false
-			);
-
-			virtual_overseer
-		})
-	});
-}
-
-#[test]
-fn cannot_participate_if_cannot_recover_validation_code() {
-	test_harness(|mut virtual_overseer| {
-		Box::pin(async move {
-			activate_leaf(&mut virtual_overseer, 10).await;
-			let receive_availability = participate(&mut virtual_overseer).await;
-			recover_available_data(&mut virtual_overseer, receive_availability).await;
-
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-					_,
-					RuntimeApiRequest::ValidationCodeByHash(
-						_,
-						tx,
-					)
-				)) => {
-					tx.send(Ok(None)).unwrap();
-				},
-				"overseer did not receive runtime API request for validation code",
-			);
-
-			virtual_overseer
-		})
-	});
-}
-
-#[test]
-fn cast_invalid_vote_if_available_data_is_invalid() {
-	test_harness(|mut virtual_overseer| {
-		Box::pin(async move {
-			activate_leaf(&mut virtual_overseer, 10).await;
-			let receive_availability = participate(&mut virtual_overseer).await;
-
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::AvailabilityRecovery(
-					AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, tx)
-				) => {
-					tx.send(Err(RecoveryError::Invalid)).unwrap();
-				},
-				"overseer did not receive recover available data message",
-			);
-
-			assert_eq!(receive_availability.await.expect("Availability should get reported"), true);
-
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::DisputeCoordinator(DisputeCoordinatorMessage::IssueLocalStatement(
-					_,
-					_,
-					_,
-					false,
-				)),
-				"overseer did not receive issue local statement message",
-			);
-
-			virtual_overseer
-		})
-	});
-}
-
-#[test]
-fn cast_invalid_vote_if_validation_fails_or_is_invalid() {
-	test_harness(|mut virtual_overseer| {
-		Box::pin(async move {
-			activate_leaf(&mut virtual_overseer, 10).await;
-			let receive_availability = participate(&mut virtual_overseer).await;
-			recover_available_data(&mut virtual_overseer, receive_availability).await;
-			fetch_validation_code(&mut virtual_overseer).await;
-			store_available_data(&mut virtual_overseer, true).await;
-
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::CandidateValidation(
-					CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx)
-				) if timeout == APPROVAL_EXECUTION_TIMEOUT => {
-					tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))).unwrap();
-				},
-				"overseer did not receive candidate validation message",
-			);
-
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::DisputeCoordinator(DisputeCoordinatorMessage::IssueLocalStatement(
-					_,
-					_,
-					_,
-					false,
-				)),
-				"overseer did not receive issue local statement message",
-			);
-
-			virtual_overseer
-		})
-	});
-}
-
-#[test]
-fn cast_invalid_vote_if_validation_passes_but_commitments_dont_match() {
-	test_harness(|mut virtual_overseer| {
-		Box::pin(async move {
-			activate_leaf(&mut virtual_overseer, 10).await;
-			let receive_availability = participate(&mut virtual_overseer).await;
-			recover_available_data(&mut virtual_overseer, receive_availability).await;
-			fetch_validation_code(&mut virtual_overseer).await;
-			store_available_data(&mut virtual_overseer, true).await;
-
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::CandidateValidation(
-					CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx)
-				) if timeout == APPROVAL_EXECUTION_TIMEOUT => {
-					let mut commitments = CandidateCommitments::default();
-					// this should lead to a commitments hash mismatch
-					commitments.processed_downward_messages = 42;
-
-					tx.send(Ok(ValidationResult::Valid(commitments, Default::default()))).unwrap();
-				},
-				"overseer did not receive candidate validation message",
-			);
-
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::DisputeCoordinator(DisputeCoordinatorMessage::IssueLocalStatement(
-					_,
-					_,
-					_,
-					false,
-				)),
-				"overseer did not receive issue local statement message",
-			);
-
-			virtual_overseer
-		})
-	});
-}
-
-#[test]
-fn cast_valid_vote_if_validation_passes() {
-	test_harness(|mut virtual_overseer| {
-		Box::pin(async move {
-			activate_leaf(&mut virtual_overseer, 10).await;
-			let receive_availability = participate(&mut virtual_overseer).await;
-			recover_available_data(&mut virtual_overseer, receive_availability).await;
-			fetch_validation_code(&mut virtual_overseer).await;
-			store_available_data(&mut virtual_overseer, true).await;
-
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::CandidateValidation(
-					CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx)
-				) if timeout == APPROVAL_EXECUTION_TIMEOUT => {
-					tx.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))).unwrap();
-				},
-				"overseer did not receive candidate validation message",
-			);
-
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::DisputeCoordinator(DisputeCoordinatorMessage::IssueLocalStatement(
-					_,
-					_,
-					_,
-					true,
-				)),
-				"overseer did not receive issue local statement message",
-			);
-
-			virtual_overseer
-		})
-	});
-}
-
-#[test]
-fn failure_to_store_available_data_does_not_preclude_participation() {
-	test_harness(|mut virtual_overseer| {
-		Box::pin(async move {
-			activate_leaf(&mut virtual_overseer, 10).await;
-			let receive_availability = participate(&mut virtual_overseer).await;
-			recover_available_data(&mut virtual_overseer, receive_availability).await;
-			fetch_validation_code(&mut virtual_overseer).await;
-			// the store available data request should fail
-			store_available_data(&mut virtual_overseer, false).await;
-
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::CandidateValidation(
-					CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, timeout, tx)
-				) if timeout == APPROVAL_EXECUTION_TIMEOUT => {
-					tx.send(Err(ValidationFailed("fail".to_string()))).unwrap();
-				},
-				"overseer did not receive candidate validation message",
-			);
-
-			assert_matches!(
-				virtual_overseer.recv().await,
-				AllMessages::DisputeCoordinator(DisputeCoordinatorMessage::IssueLocalStatement(
-					_,
-					_,
-					_,
-					false,
-				)),
-				"overseer did not receive issue local statement message",
-			);
-
-			virtual_overseer
-		})
-	});
-}
diff --git a/polkadot/node/network/availability-distribution/src/error.rs b/polkadot/node/network/availability-distribution/src/error.rs
index d9db0ec42fa..d3ff182b33a 100644
--- a/polkadot/node/network/availability-distribution/src/error.rs
+++ b/polkadot/node/network/availability-distribution/src/error.rs
@@ -49,18 +49,18 @@ impl From<runtime::Error> for Error {
 #[derive(Debug, Error)]
 pub enum Fatal {
 	/// Spawning a running task failed.
-	#[error("Spawning subsystem task failed")]
+	#[error("Spawning subsystem task failed: {0}")]
 	SpawnTask(#[source] SubsystemError),
 
 	/// Requester stream exhausted.
 	#[error("Erasure chunk requester stream exhausted")]
 	RequesterExhausted,
 
-	#[error("Receive channel closed")]
+	#[error("Receive channel closed: {0}")]
 	IncomingMessageChannel(#[source] SubsystemError),
 
 	/// Errors coming from runtime::Runtime.
-	#[error("Error while accessing runtime information")]
+	#[error("Error while accessing runtime information: {0}")]
 	Runtime(#[from] runtime::Fatal),
 }
 
@@ -84,7 +84,7 @@ pub enum NonFatal {
 	SendResponse,
 
 	/// Fetching PoV failed with `RequestError`.
-	#[error("FetchPoV request error")]
+	#[error("FetchPoV request error: {0}")]
 	FetchPoV(#[source] RequestError),
 
 	/// Fetching PoV failed as the received PoV did not match the expected hash.
@@ -99,7 +99,7 @@ pub enum NonFatal {
 	InvalidValidatorIndex,
 
 	/// Errors coming from runtime::Runtime.
-	#[error("Error while accessing runtime information")]
+	#[error("Error while accessing runtime information: {0}")]
 	Runtime(#[from] runtime::NonFatal),
 }
 
diff --git a/polkadot/node/network/bridge/src/tests.rs b/polkadot/node/network/bridge/src/tests.rs
index ec83d223352..e3bfa0a32ce 100644
--- a/polkadot/node/network/bridge/src/tests.rs
+++ b/polkadot/node/network/bridge/src/tests.rs
@@ -1225,8 +1225,6 @@ fn spread_event_to_subsystems_is_up_to_date() {
 				cnt += 1;
 			},
 			AllMessages::DisputeCoordinator(_) => unreachable!("Not interested in network events"),
-			AllMessages::DisputeParticipation(_) =>
-				unreachable!("Not interested in network events"),
 			AllMessages::DisputeDistribution(_) => unreachable!("Not interested in network events"),
 			AllMessages::ChainSelection(_) => unreachable!("Not interested in network events"),
 			// Add variants here as needed, `{ cnt += 1; }` for those that need to be
diff --git a/polkadot/node/network/dispute-distribution/src/lib.rs b/polkadot/node/network/dispute-distribution/src/lib.rs
index d1890f4c6b2..9b4a7a84c94 100644
--- a/polkadot/node/network/dispute-distribution/src/lib.rs
+++ b/polkadot/node/network/dispute-distribution/src/lib.rs
@@ -145,7 +145,7 @@ where
 	) -> Self {
 		let runtime = RuntimeInfo::new_with_config(runtime::Config {
 			keystore: Some(keystore),
-			session_cache_lru_size: DISPUTE_WINDOW as usize,
+			session_cache_lru_size: DISPUTE_WINDOW.get() as usize,
 		});
 		let (tx, sender_rx) = mpsc::channel(1);
 		let disputes_sender = DisputeSender::new(tx, metrics.clone());
diff --git a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs
index 32e5a686b4d..03d242c7199 100644
--- a/polkadot/node/network/dispute-distribution/src/receiver/mod.rs
+++ b/polkadot/node/network/dispute-distribution/src/receiver/mod.rs
@@ -145,7 +145,7 @@ where
 	) -> Self {
 		let runtime = RuntimeInfo::new_with_config(runtime::Config {
 			keystore: None,
-			session_cache_lru_size: DISPUTE_WINDOW as usize,
+			session_cache_lru_size: DISPUTE_WINDOW.get() as usize,
 		});
 		Self {
 			runtime,
diff --git a/polkadot/node/overseer/src/dummy.rs b/polkadot/node/overseer/src/dummy.rs
index 09b594867fc..bc93ffa064f 100644
--- a/polkadot/node/overseer/src/dummy.rs
+++ b/polkadot/node/overseer/src/dummy.rs
@@ -89,7 +89,6 @@ pub fn dummy_overseer_builder<'a, Spawner, SupportsParachains>(
 		DummySubsystem,
 		DummySubsystem,
 		DummySubsystem,
-		DummySubsystem,
 	>,
 	SubsystemError,
 >
@@ -130,7 +129,6 @@ pub fn one_for_all_overseer_builder<'a, Spawner, SupportsParachains, Sub>(
 		Sub,
 		Sub,
 		Sub,
-		Sub,
 	>,
 	SubsystemError,
 >
@@ -156,7 +154,6 @@ where
 		+ Subsystem<OverseerSubsystemContext<ApprovalVotingMessage>, SubsystemError>
 		+ Subsystem<OverseerSubsystemContext<GossipSupportMessage>, SubsystemError>
 		+ Subsystem<OverseerSubsystemContext<DisputeCoordinatorMessage>, SubsystemError>
-		+ Subsystem<OverseerSubsystemContext<DisputeParticipationMessage>, SubsystemError>
 		+ Subsystem<OverseerSubsystemContext<DisputeDistributionMessage>, SubsystemError>
 		+ Subsystem<OverseerSubsystemContext<ChainSelectionMessage>, SubsystemError>,
 {
@@ -181,7 +178,6 @@ where
 		.approval_voting(subsystem.clone())
 		.gossip_support(subsystem.clone())
 		.dispute_coordinator(subsystem.clone())
-		.dispute_participation(subsystem.clone())
 		.dispute_distribution(subsystem.clone())
 		.chain_selection(subsystem)
 		.activation_external_listeners(Default::default())
diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs
index cd64539dbaf..9321fad7a42 100644
--- a/polkadot/node/overseer/src/lib.rs
+++ b/polkadot/node/overseer/src/lib.rs
@@ -80,9 +80,9 @@ use polkadot_node_subsystem_types::messages::{
 	AvailabilityRecoveryMessage, AvailabilityStoreMessage, BitfieldDistributionMessage,
 	BitfieldSigningMessage, CandidateBackingMessage, CandidateValidationMessage, ChainApiMessage,
 	ChainSelectionMessage, CollationGenerationMessage, CollatorProtocolMessage,
-	DisputeCoordinatorMessage, DisputeDistributionMessage, DisputeParticipationMessage,
-	GossipSupportMessage, NetworkBridgeEvent, NetworkBridgeMessage, ProvisionerMessage,
-	RuntimeApiMessage, StatementDistributionMessage,
+	DisputeCoordinatorMessage, DisputeDistributionMessage, GossipSupportMessage,
+	NetworkBridgeEvent, NetworkBridgeMessage, ProvisionerMessage, RuntimeApiMessage,
+	StatementDistributionMessage,
 };
 pub use polkadot_node_subsystem_types::{
 	errors::{SubsystemError, SubsystemResult},
@@ -462,9 +462,6 @@ pub struct Overseer<SupportsParachains> {
 	#[subsystem(no_dispatch, DisputeCoordinatorMessage)]
 	dispute_coordinator: DisputeCoordinator,
 
-	#[subsystem(no_dispatch, DisputeParticipationMessage)]
-	dispute_participation: DisputeParticipation,
-
 	#[subsystem(no_dispatch, DisputeDistributionMessage)]
 	dispute_distribution: DisputeDistribution,
 
diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs
index 9d4fb116ab4..7470028fed9 100644
--- a/polkadot/node/overseer/src/tests.rs
+++ b/polkadot/node/overseer/src/tests.rs
@@ -888,17 +888,6 @@ fn test_dispute_coordinator_msg() -> DisputeCoordinatorMessage {
 	DisputeCoordinatorMessage::RecentDisputes(sender)
 }
 
-fn test_dispute_participation_msg() -> DisputeParticipationMessage {
-	let (sender, _) = oneshot::channel();
-	DisputeParticipationMessage::Participate {
-		candidate_hash: Default::default(),
-		candidate_receipt: Default::default(),
-		session: 0,
-		n_validators: 0,
-		report_availability: sender,
-	}
-}
-
 fn test_dispute_distribution_msg() -> DisputeDistributionMessage {
 	let dummy_dispute_message = UncheckedDisputeMessage {
 		candidate_receipt: Default::default(),
@@ -930,7 +919,7 @@ fn test_chain_selection_msg() -> ChainSelectionMessage {
 // Checks that `stop`, `broadcast_signal` and `broadcast_message` are implemented correctly.
 #[test]
 fn overseer_all_subsystems_receive_signals_and_messages() {
-	const NUM_SUBSYSTEMS: usize = 21;
+	const NUM_SUBSYSTEMS: usize = 20;
 	// -3 for BitfieldSigning, GossipSupport and AvailabilityDistribution
 	const NUM_SUBSYSTEMS_MESSAGED: usize = NUM_SUBSYSTEMS - 3;
 
@@ -1009,9 +998,6 @@ fn overseer_all_subsystems_receive_signals_and_messages() {
 		handle
 			.send_msg_anon(AllMessages::DisputeCoordinator(test_dispute_coordinator_msg()))
 			.await;
-		handle
-			.send_msg_anon(AllMessages::DisputeParticipation(test_dispute_participation_msg()))
-			.await;
 		handle
 			.send_msg_anon(AllMessages::DisputeDistribution(test_dispute_distribution_msg()))
 			.await;
@@ -1069,7 +1055,6 @@ fn context_holds_onto_message_until_enough_signals_received() {
 	let (approval_voting_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY);
 	let (gossip_support_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY);
 	let (dispute_coordinator_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY);
-	let (dispute_participation_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY);
 	let (dispute_distribution_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY);
 	let (chain_selection_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY);
 
@@ -1091,7 +1076,6 @@ fn context_holds_onto_message_until_enough_signals_received() {
 	let (approval_voting_unbounded_tx, _) = metered::unbounded();
 	let (gossip_support_unbounded_tx, _) = metered::unbounded();
 	let (dispute_coordinator_unbounded_tx, _) = metered::unbounded();
-	let (dispute_participation_unbounded_tx, _) = metered::unbounded();
 	let (dispute_distribution_unbounded_tx, _) = metered::unbounded();
 	let (chain_selection_unbounded_tx, _) = metered::unbounded();
 
@@ -1114,7 +1098,6 @@ fn context_holds_onto_message_until_enough_signals_received() {
 		approval_voting: approval_voting_bounded_tx.clone(),
 		gossip_support: gossip_support_bounded_tx.clone(),
 		dispute_coordinator: dispute_coordinator_bounded_tx.clone(),
-		dispute_participation: dispute_participation_bounded_tx.clone(),
 		dispute_distribution: dispute_distribution_bounded_tx.clone(),
 		chain_selection: chain_selection_bounded_tx.clone(),
 
@@ -1136,7 +1119,6 @@ fn context_holds_onto_message_until_enough_signals_received() {
 		approval_voting_unbounded: approval_voting_unbounded_tx.clone(),
 		gossip_support_unbounded: gossip_support_unbounded_tx.clone(),
 		dispute_coordinator_unbounded: dispute_coordinator_unbounded_tx.clone(),
-		dispute_participation_unbounded: dispute_participation_unbounded_tx.clone(),
 		dispute_distribution_unbounded: dispute_distribution_unbounded_tx.clone(),
 		chain_selection_unbounded: chain_selection_unbounded_tx.clone(),
 	};
diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs
index adc8846f429..4f4c52fb39d 100644
--- a/polkadot/node/primitives/src/lib.rs
+++ b/polkadot/node/primitives/src/lib.rs
@@ -65,12 +65,6 @@ pub const VALIDATION_CODE_BOMB_LIMIT: usize = (MAX_CODE_SIZE * 4u32) as usize;
 /// The bomb limit for decompressing PoV blobs.
 pub const POV_BOMB_LIMIT: usize = (MAX_POV_SIZE * 4u32) as usize;
 
-/// It would be nice to draw this from the chain state, but we have no tools for it right now.
-/// On Polkadot this is 1 day, and on Kusama it's 6 hours.
-///
-/// Number of sessions we want to consider in disputes.
-pub const DISPUTE_WINDOW: SessionIndex = 6;
-
 /// The amount of time to spend on execution during backing.
 pub const BACKING_EXECUTION_TIMEOUT: Duration = Duration::from_secs(2);
 
@@ -82,6 +76,59 @@ pub const BACKING_EXECUTION_TIMEOUT: Duration = Duration::from_secs(2);
 /// dispute participants.
 pub const APPROVAL_EXECUTION_TIMEOUT: Duration = Duration::from_secs(6);
 
+/// Type of a session window size.
+///
+/// We are not using `NonZeroU32` here because `expect` and `unwrap` are not yet const, so global
+/// constants of `SessionWindowSize` would require `lazy_static` in that case.
+///
+/// See: https://github.com/rust-lang/rust/issues/67441
+#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
+pub struct SessionWindowSize(SessionIndex);
+
+#[macro_export]
+/// Create a new checked `SessionWindowSize`
+///
+/// which cannot be 0.
+macro_rules! new_session_window_size {
+	(0) => {
+		compile_error!("Must be non zero");
+	};
+	(0_u32) => {
+		compile_error!("Must be non zero");
+	};
+	(0 as u32) => {
+		compile_error!("Must be non zero");
+	};
+	(0 as _) => {
+		compile_error!("Must be non zero");
+	};
+	($l:literal) => {
+		SessionWindowSize::unchecked_new($l as _)
+	};
+}
+
+/// It would be nice to draw this from the chain state, but we have no tools for it right now.
+/// On Polkadot this is 1 day, and on Kusama it's 6 hours.
+///
+/// Number of sessions we want to consider in disputes.
+pub const DISPUTE_WINDOW: SessionWindowSize = new_session_window_size!(6);
+
+impl SessionWindowSize {
+	/// Get the value as `SessionIndex` for doing comparisons with those.
+	pub fn get(self) -> SessionIndex {
+		self.0
+	}
+
+	/// Helper function for `new_session_window_size`.
+	///
+	/// Don't use it. The only reason it is public, is because otherwise the
+	/// `new_session_window_size` macro would not work outside of this module.
+	#[doc(hidden)]
+	pub const fn unchecked_new(size: SessionIndex) -> Self {
+		Self(size)
+	}
+}
+
 /// The cumulative weight of a block in a fork-choice rule.
 pub type BlockWeight = u32;
 
diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml
index 7b21e4d99c4..b82b01a5a1f 100644
--- a/polkadot/node/service/Cargo.toml
+++ b/polkadot/node/service/Cargo.toml
@@ -108,7 +108,6 @@ polkadot-node-core-candidate-validation = { path = "../core/candidate-validation
 polkadot-node-core-chain-api = { path = "../core/chain-api", optional = true }
 polkadot-node-core-chain-selection = { path = "../core/chain-selection", optional = true }
 polkadot-node-core-dispute-coordinator = { path = "../core/dispute-coordinator", optional = true }
-polkadot-node-core-dispute-participation = { path = "../core/dispute-participation", optional = true }
 polkadot-node-core-provisioner = { path = "../core/provisioner", optional = true }
 polkadot-node-core-runtime-api = { path = "../core/runtime-api", optional = true }
 polkadot-statement-distribution = { path = "../network/statement-distribution", optional = true }
@@ -145,7 +144,6 @@ full-node = [
 	"polkadot-node-core-chain-api",
 	"polkadot-node-core-chain-selection",
 	"polkadot-node-core-dispute-coordinator",
-	"polkadot-node-core-dispute-participation",
 	"polkadot-node-core-provisioner",
 	"polkadot-node-core-runtime-api",
 	"polkadot-statement-distribution",
diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs
index 4651626d46b..08d94445df8 100644
--- a/polkadot/node/service/src/overseer.rs
+++ b/polkadot/node/service/src/overseer.rs
@@ -59,7 +59,6 @@ pub use polkadot_node_core_candidate_validation::CandidateValidationSubsystem;
 pub use polkadot_node_core_chain_api::ChainApiSubsystem;
 pub use polkadot_node_core_chain_selection::ChainSelectionSubsystem;
 pub use polkadot_node_core_dispute_coordinator::DisputeCoordinatorSubsystem;
-pub use polkadot_node_core_dispute_participation::DisputeParticipationSubsystem;
 pub use polkadot_node_core_provisioner::ProvisioningSubsystem as ProvisionerSubsystem;
 pub use polkadot_node_core_runtime_api::RuntimeApiSubsystem;
 pub use polkadot_statement_distribution::StatementDistribution as StatementDistributionSubsystem;
@@ -159,7 +158,6 @@ pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>(
 		ApprovalVotingSubsystem,
 		GossipSupportSubsystem<AuthorityDiscoveryService>,
 		DisputeCoordinatorSubsystem,
-		DisputeParticipationSubsystem,
 		DisputeDistributionSubsystem<AuthorityDiscoveryService>,
 		ChainSelectionSubsystem,
 	>,
@@ -259,7 +257,6 @@ where
 			keystore.clone(),
 			Metrics::register(registry)?,
 		))
-		.dispute_participation(DisputeParticipationSubsystem::new())
 		.dispute_distribution(DisputeDistributionSubsystem::new(
 			keystore.clone(),
 			dispute_req_receiver,
diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs
index 75b9b3f9d33..48baf44aac4 100644
--- a/polkadot/node/subsystem-types/src/messages.rs
+++ b/polkadot/node/subsystem-types/src/messages.rs
@@ -275,25 +275,6 @@ pub enum ImportStatementsResult {
 	ValidImport,
 }
 
-/// Messages received by the dispute participation subsystem.
-#[derive(Debug)]
-pub enum DisputeParticipationMessage {
-	/// Validate a candidate for the purposes of participating in a dispute.
-	Participate {
-		/// The hash of the candidate
-		candidate_hash: CandidateHash,
-		/// The candidate receipt itself.
-		candidate_receipt: CandidateReceipt,
-		/// The session the candidate appears in.
-		session: SessionIndex,
-		/// The number of validators in the session.
-		n_validators: u32,
-		/// Give immediate feedback on whether the candidate was available or
-		/// not.
-		report_availability: oneshot::Sender<bool>,
-	},
-}
-
 /// Messages going to the dispute distribution subsystem.
 #[derive(Debug)]
 pub enum DisputeDistributionMessage {
diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml
index 00fecf5817e..7e0b1ccb8c5 100644
--- a/polkadot/node/subsystem-util/Cargo.toml
+++ b/polkadot/node/subsystem-util/Cargo.toml
@@ -22,6 +22,7 @@ polkadot-node-jaeger = { path = "../jaeger" }
 polkadot-node-metrics = { path = "../metrics" }
 polkadot-node-network-protocol = { path = "../network/protocol" }
 polkadot-primitives = { path = "../../primitives" }
+polkadot-node-primitives = { path = "../primitives" }
 polkadot-overseer = { path = "../overseer" }
 metered-channel = { path = "../metered-channel" }
 
@@ -35,3 +36,4 @@ env_logger = "0.9.0"
 futures = { version = "0.3.17", features = ["thread-pool"] }
 log = "0.4.13"
 polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" }
+lazy_static = "1.4.0"
diff --git a/polkadot/node/subsystem-util/src/lib.rs b/polkadot/node/subsystem-util/src/lib.rs
index bf3652a0445..4b400d8f4c1 100644
--- a/polkadot/node/subsystem-util/src/lib.rs
+++ b/polkadot/node/subsystem-util/src/lib.rs
@@ -209,6 +209,7 @@ specialize_requests! {
 	fn request_assumed_validation_data(para_id: ParaId, expected_persisted_validation_data_hash: Hash) -> Option<(PersistedValidationData, ValidationCodeHash)>; AssumedValidationData;
 	fn request_session_index_for_child() -> SessionIndex; SessionIndexForChild;
 	fn request_validation_code(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option<ValidationCode>; ValidationCode;
+	fn request_validation_code_by_hash(validation_code_hash: ValidationCodeHash) -> Option<ValidationCode>; ValidationCodeByHash;
 	fn request_candidate_pending_availability(para_id: ParaId) -> Option<CommittedCandidateReceipt>; CandidatePendingAvailability;
 	fn request_candidate_events() -> Vec<CandidateEvent>; CandidateEvents;
 	fn request_session_info(index: SessionIndex) -> Option<SessionInfo>; SessionInfo;
diff --git a/polkadot/node/subsystem-util/src/rolling_session_window.rs b/polkadot/node/subsystem-util/src/rolling_session_window.rs
index fc2957b7ea2..2ecf51d8df8 100644
--- a/polkadot/node/subsystem-util/src/rolling_session_window.rs
+++ b/polkadot/node/subsystem-util/src/rolling_session_window.rs
@@ -19,6 +19,7 @@
 //! This is useful for consensus components which need to stay up-to-date about recent sessions but don't
 //! care about the state of particular blocks.
 
+pub use polkadot_node_primitives::{new_session_window_size, SessionWindowSize};
 use polkadot_primitives::v1::{Hash, SessionIndex, SessionInfo};
 
 use futures::channel::oneshot;
@@ -27,6 +28,7 @@ use polkadot_node_subsystem::{
 	messages::{RuntimeApiMessage, RuntimeApiRequest},
 	overseer, SubsystemContext,
 };
+use thiserror::Error;
 
 /// Sessions unavailable in state to cache.
 #[derive(Debug)]
@@ -51,7 +53,7 @@ pub struct SessionsUnavailableInfo {
 }
 
 /// Sessions were unavailable to fetch from the state for some reason.
-#[derive(Debug)]
+#[derive(Debug, Error)]
 pub struct SessionsUnavailable {
 	/// The error kind.
 	kind: SessionsUnavailableKind,
@@ -59,16 +61,15 @@ pub struct SessionsUnavailable {
 	info: Option<SessionsUnavailableInfo>,
 }
 
+impl core::fmt::Display for SessionsUnavailable {
+	fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
+		write!(f, "Sessions unavailable: {:?}, info: {:?}", self.kind, self.info)
+	}
+}
+
 /// An indicated update of the rolling session window.
 #[derive(Debug, PartialEq, Clone)]
 pub enum SessionWindowUpdate {
-	/// The session window was just initialized to the current values.
-	Initialized {
-		/// The start of the window (inclusive).
-		window_start: SessionIndex,
-		/// The end of the window (inclusive).
-		window_end: SessionIndex,
-	},
 	/// The session window was just advanced from one range to a new one.
 	Advanced {
 		/// The previous start of the window (inclusive).
@@ -85,49 +86,63 @@ pub enum SessionWindowUpdate {
 }
 
 /// A rolling window of sessions and cached session info.
-#[derive(Default)]
 pub struct RollingSessionWindow {
-	earliest_session: Option<SessionIndex>,
+	earliest_session: SessionIndex,
 	session_info: Vec<SessionInfo>,
-	window_size: SessionIndex,
+	window_size: SessionWindowSize,
 }
 
 impl RollingSessionWindow {
 	/// Initialize a new session info cache with the given window size.
-	pub fn new(window_size: SessionIndex) -> Self {
-		RollingSessionWindow { earliest_session: None, session_info: Vec::new(), window_size }
+	pub async fn new(
+		ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
+		window_size: SessionWindowSize,
+		block_hash: Hash,
+	) -> Result<Self, SessionsUnavailable> {
+		let session_index = get_session_index_for_head(ctx, block_hash).await?;
+
+		let window_start = session_index.saturating_sub(window_size.get() - 1);
+
+		match load_all_sessions(ctx, block_hash, window_start, session_index).await {
+			Err(kind) => Err(SessionsUnavailable {
+				kind,
+				info: Some(SessionsUnavailableInfo {
+					window_start,
+					window_end: session_index,
+					block_hash,
+				}),
+			}),
+			Ok(s) => Ok(Self { earliest_session: window_start, session_info: s, window_size }),
+		}
 	}
 
 	/// Initialize a new session info cache with the given window size and
 	/// initial data.
 	pub fn with_session_info(
-		window_size: SessionIndex,
+		window_size: SessionWindowSize,
 		earliest_session: SessionIndex,
 		session_info: Vec<SessionInfo>,
 	) -> Self {
-		RollingSessionWindow { earliest_session: Some(earliest_session), session_info, window_size }
+		RollingSessionWindow { earliest_session, session_info, window_size }
 	}
 
 	/// Access the session info for the given session index, if stored within the window.
 	pub fn session_info(&self, index: SessionIndex) -> Option<&SessionInfo> {
-		self.earliest_session.and_then(|earliest| {
-			if index < earliest {
-				None
-			} else {
-				self.session_info.get((index - earliest) as usize)
-			}
-		})
+		if index < self.earliest_session {
+			None
+		} else {
+			self.session_info.get((index - self.earliest_session) as usize)
+		}
 	}
 
-	/// Access the index of the earliest session, if the window is not empty.
-	pub fn earliest_session(&self) -> Option<SessionIndex> {
-		self.earliest_session.clone()
+	/// Access the index of the earliest session.
+	pub fn earliest_session(&self) -> SessionIndex {
+		self.earliest_session
 	}
 
-	/// Access the index of the latest session, if the window is not empty.
-	pub fn latest_session(&self) -> Option<SessionIndex> {
-		self.earliest_session
-			.map(|earliest| earliest + (self.session_info.len() as SessionIndex).saturating_sub(1))
+	/// Access the index of the latest session.
+	pub fn latest_session(&self) -> SessionIndex {
+		self.earliest_session + (self.session_info.len() as SessionIndex).saturating_sub(1)
 	}
 
 	/// When inspecting a new import notification, updates the session info cache to match
@@ -142,116 +157,86 @@ impl RollingSessionWindow {
 		ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 		block_hash: Hash,
 	) -> Result<SessionWindowUpdate, SessionsUnavailable> {
-		if self.window_size == 0 {
-			return Ok(SessionWindowUpdate::Unchanged)
-		}
+		let session_index = get_session_index_for_head(ctx, block_hash).await?;
 
-		let session_index = {
-			let (s_tx, s_rx) = oneshot::channel();
-
-			// We're requesting session index of a child to populate the cache in advance.
-			ctx.send_message(RuntimeApiMessage::Request(
-				block_hash,
-				RuntimeApiRequest::SessionIndexForChild(s_tx),
-			))
-			.await;
-
-			match s_rx.await {
-				Ok(Ok(s)) => s,
-				Ok(Err(e)) =>
-					return Err(SessionsUnavailable {
-						kind: SessionsUnavailableKind::RuntimeApi(e),
-						info: None,
-					}),
-				Err(e) =>
-					return Err(SessionsUnavailable {
-						kind: SessionsUnavailableKind::RuntimeApiUnavailable(e),
-						info: None,
-					}),
-			}
-		};
+		let old_window_start = self.earliest_session;
 
-		match self.earliest_session {
-			None => {
-				// First block processed on start-up.
-
-				let window_start = session_index.saturating_sub(self.window_size - 1);
-
-				match load_all_sessions(ctx, block_hash, window_start, session_index).await {
-					Err(kind) => Err(SessionsUnavailable {
-						kind,
-						info: Some(SessionsUnavailableInfo {
-							window_start,
-							window_end: session_index,
-							block_hash,
-						}),
-					}),
-					Ok(s) => {
-						let update = SessionWindowUpdate::Initialized {
-							window_start,
-							window_end: session_index,
-						};
-
-						self.earliest_session = Some(window_start);
-						self.session_info = s;
-
-						Ok(update)
-					},
-				}
-			},
-			Some(old_window_start) => {
-				let latest =
-					self.latest_session().expect("latest always exists if earliest does; qed");
+		let latest = self.latest_session();
 
-				// Either cached or ancient.
-				if session_index <= latest {
-					return Ok(SessionWindowUpdate::Unchanged)
-				}
+		// Either cached or ancient.
+		if session_index <= latest {
+			return Ok(SessionWindowUpdate::Unchanged)
+		}
 
-				let old_window_end = latest;
-
-				let window_start = session_index.saturating_sub(self.window_size - 1);
-
-				// keep some of the old window, if applicable.
-				let overlap_start = window_start.saturating_sub(old_window_start);
-
-				let fresh_start = if latest < window_start { window_start } else { latest + 1 };
-
-				match load_all_sessions(ctx, block_hash, fresh_start, session_index).await {
-					Err(kind) => Err(SessionsUnavailable {
-						kind,
-						info: Some(SessionsUnavailableInfo {
-							window_start: fresh_start,
-							window_end: session_index,
-							block_hash,
-						}),
-					}),
-					Ok(s) => {
-						let update = SessionWindowUpdate::Advanced {
-							prev_window_start: old_window_start,
-							prev_window_end: old_window_end,
-							new_window_start: window_start,
-							new_window_end: session_index,
-						};
-
-						let outdated =
-							std::cmp::min(overlap_start as usize, self.session_info.len());
-						self.session_info.drain(..outdated);
-						self.session_info.extend(s);
-						// we need to account for this case:
-						// window_start ................................... session_index
-						//              old_window_start ........... latest
-						let new_earliest = std::cmp::max(window_start, old_window_start);
-						self.earliest_session = Some(new_earliest);
-
-						Ok(update)
-					},
-				}
+		let old_window_end = latest;
+
+		let window_start = session_index.saturating_sub(self.window_size.get() - 1);
+
+		// keep some of the old window, if applicable.
+		let overlap_start = window_start.saturating_sub(old_window_start);
+
+		let fresh_start = if latest < window_start { window_start } else { latest + 1 };
+
+		match load_all_sessions(ctx, block_hash, fresh_start, session_index).await {
+			Err(kind) => Err(SessionsUnavailable {
+				kind,
+				info: Some(SessionsUnavailableInfo {
+					window_start: fresh_start,
+					window_end: session_index,
+					block_hash,
+				}),
+			}),
+			Ok(s) => {
+				let update = SessionWindowUpdate::Advanced {
+					prev_window_start: old_window_start,
+					prev_window_end: old_window_end,
+					new_window_start: window_start,
+					new_window_end: session_index,
+				};
+
+				let outdated = std::cmp::min(overlap_start as usize, self.session_info.len());
+				self.session_info.drain(..outdated);
+				self.session_info.extend(s);
+				// we need to account for this case:
+				// window_start ................................... session_index
+				//              old_window_start ........... latest
+				let new_earliest = std::cmp::max(window_start, old_window_start);
+				self.earliest_session = new_earliest;
+
+				Ok(update)
 			},
 		}
 	}
 }
 
+async fn get_session_index_for_head(
+	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
+	block_hash: Hash,
+) -> Result<SessionIndex, SessionsUnavailable> {
+	let (s_tx, s_rx) = oneshot::channel();
+
+	// We're requesting session index of a child to populate the cache in advance.
+	ctx.send_message(RuntimeApiMessage::Request(
+		block_hash,
+		RuntimeApiRequest::SessionIndexForChild(s_tx),
+	))
+	.await;
+
+	match s_rx.await {
+		Ok(Ok(s)) => Ok(s),
+		Ok(Err(e)) =>
+			return Err(SessionsUnavailable {
+				kind: SessionsUnavailableKind::RuntimeApi(e),
+				info: None,
+			}),
+		Err(e) =>
+			return Err(SessionsUnavailable {
+				kind: SessionsUnavailableKind::RuntimeApiUnavailable(e),
+				info: None,
+			}),
+	}
+}
+
 async fn load_all_sessions(
 	ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
 	block_hash: Hash,
@@ -289,7 +274,7 @@ mod tests {
 	use polkadot_primitives::v1::Header;
 	use sp_core::testing::TaskExecutor;
 
-	const TEST_WINDOW_SIZE: SessionIndex = 6;
+	pub const TEST_WINDOW_SIZE: SessionWindowSize = new_session_window_size!(6);
 
 	fn dummy_session_info(index: SessionIndex) -> SessionInfo {
 		SessionInfo {
@@ -309,7 +294,7 @@ mod tests {
 	fn cache_session_info_test(
 		expected_start_session: SessionIndex,
 		session: SessionIndex,
-		mut window: RollingSessionWindow,
+		window: Option<RollingSessionWindow>,
 		expect_requests_from: SessionIndex,
 	) {
 		let header = Header {
@@ -328,9 +313,15 @@ mod tests {
 
 		let test_fut = {
 			Box::pin(async move {
-				window.cache_session_info_for_head(&mut ctx, hash).await.unwrap();
-
-				assert_eq!(window.earliest_session, Some(expected_start_session));
+				let window = match window {
+					None =>
+						RollingSessionWindow::new(&mut ctx, TEST_WINDOW_SIZE, hash).await.unwrap(),
+					Some(mut window) => {
+						window.cache_session_info_for_head(&mut ctx, hash).await.unwrap();
+						window
+					},
+				};
+				assert_eq!(window.earliest_session, expected_start_session);
 				assert_eq!(
 					window.session_info,
 					(expected_start_session..=session).map(dummy_session_info).collect::<Vec<_>>(),
@@ -370,34 +361,34 @@ mod tests {
 
 	#[test]
 	fn cache_session_info_first_early() {
-		cache_session_info_test(0, 1, RollingSessionWindow::new(TEST_WINDOW_SIZE), 0);
+		cache_session_info_test(0, 1, None, 0);
 	}
 
 	#[test]
 	fn cache_session_info_does_not_underflow() {
 		let window = RollingSessionWindow {
-			earliest_session: Some(1),
+			earliest_session: 1,
 			session_info: vec![dummy_session_info(1)],
 			window_size: TEST_WINDOW_SIZE,
 		};
 
-		cache_session_info_test(1, 2, window, 2);
+		cache_session_info_test(1, 2, Some(window), 2);
 	}
 
 	#[test]
 	fn cache_session_info_first_late() {
 		cache_session_info_test(
-			(100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE - 1),
+			(100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE.get() - 1),
 			100,
-			RollingSessionWindow::new(TEST_WINDOW_SIZE),
-			(100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE - 1),
+			None,
+			(100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE.get() - 1),
 		);
 	}
 
 	#[test]
 	fn cache_session_info_jump() {
 		let window = RollingSessionWindow {
-			earliest_session: Some(50),
+			earliest_session: 50,
 			session_info: vec![
 				dummy_session_info(50),
 				dummy_session_info(51),
@@ -407,43 +398,43 @@ mod tests {
 		};
 
 		cache_session_info_test(
-			(100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE - 1),
+			(100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE.get() - 1),
 			100,
-			window,
-			(100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE - 1),
+			Some(window),
+			(100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE.get() - 1),
 		);
 	}
 
 	#[test]
 	fn cache_session_info_roll_full() {
-		let start = 99 - (TEST_WINDOW_SIZE - 1);
+		let start = 99 - (TEST_WINDOW_SIZE.get() - 1);
 		let window = RollingSessionWindow {
-			earliest_session: Some(start),
+			earliest_session: start,
 			session_info: (start..=99).map(dummy_session_info).collect(),
 			window_size: TEST_WINDOW_SIZE,
 		};
 
 		cache_session_info_test(
-			(100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE - 1),
+			(100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE.get() - 1),
 			100,
-			window,
+			Some(window),
 			100, // should only make one request.
 		);
 	}
 
 	#[test]
 	fn cache_session_info_roll_many_full() {
-		let start = 97 - (TEST_WINDOW_SIZE - 1);
+		let start = 97 - (TEST_WINDOW_SIZE.get() - 1);
 		let window = RollingSessionWindow {
-			earliest_session: Some(start),
+			earliest_session: start,
 			session_info: (start..=97).map(dummy_session_info).collect(),
 			window_size: TEST_WINDOW_SIZE,
 		};
 
 		cache_session_info_test(
-			(100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE - 1),
+			(100 as SessionIndex).saturating_sub(TEST_WINDOW_SIZE.get() - 1),
 			100,
-			window,
+			Some(window),
 			98,
 		);
 	}
@@ -452,13 +443,16 @@ mod tests {
 	fn cache_session_info_roll_early() {
 		let start = 0;
 		let window = RollingSessionWindow {
-			earliest_session: Some(start),
+			earliest_session: start,
 			session_info: (0..=1).map(dummy_session_info).collect(),
 			window_size: TEST_WINDOW_SIZE,
 		};
 
 		cache_session_info_test(
-			0, 2, window, 2, // should only make one request.
+			0,
+			2,
+			Some(window),
+			2, // should only make one request.
 		);
 	}
 
@@ -466,18 +460,18 @@ mod tests {
 	fn cache_session_info_roll_many_early() {
 		let start = 0;
 		let window = RollingSessionWindow {
-			earliest_session: Some(start),
+			earliest_session: start,
 			session_info: (0..=1).map(dummy_session_info).collect(),
 			window_size: TEST_WINDOW_SIZE,
 		};
 
-		cache_session_info_test(0, 3, window, 2);
+		cache_session_info_test(0, 3, Some(window), 2);
 	}
 
 	#[test]
 	fn any_session_unavailable_for_caching_means_no_change() {
 		let session: SessionIndex = 6;
-		let start_session = session.saturating_sub(TEST_WINDOW_SIZE - 1);
+		let start_session = session.saturating_sub(TEST_WINDOW_SIZE.get() - 1);
 
 		let header = Header {
 			digest: Default::default(),
@@ -490,13 +484,11 @@ mod tests {
 		let pool = TaskExecutor::new();
 		let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
 
-		let mut window = RollingSessionWindow::new(TEST_WINDOW_SIZE);
 		let hash = header.hash();
 
 		let test_fut = {
 			Box::pin(async move {
-				let res = window.cache_session_info_for_head(&mut ctx, hash).await;
-
+				let res = RollingSessionWindow::new(&mut ctx, TEST_WINDOW_SIZE, hash).await;
 				assert!(res.is_err());
 			})
 		};
@@ -551,14 +543,14 @@ mod tests {
 		let pool = TaskExecutor::new();
 		let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
 
-		let mut window = RollingSessionWindow::new(TEST_WINDOW_SIZE);
 		let hash = header.hash();
 
 		let test_fut = {
 			Box::pin(async move {
-				window.cache_session_info_for_head(&mut ctx, hash).await.unwrap();
+				let window =
+					RollingSessionWindow::new(&mut ctx, TEST_WINDOW_SIZE, hash).await.unwrap();
 
-				assert_eq!(window.earliest_session, Some(session));
+				assert_eq!(window.earliest_session, session);
 				assert_eq!(window.session_info, vec![dummy_session_info(session)]);
 			})
 		};
diff --git a/polkadot/node/subsystem-util/src/runtime/error.rs b/polkadot/node/subsystem-util/src/runtime/error.rs
index af61438f5ed..4cf169b4854 100644
--- a/polkadot/node/subsystem-util/src/runtime/error.rs
+++ b/polkadot/node/subsystem-util/src/runtime/error.rs
@@ -48,11 +48,11 @@ pub enum Fatal {
 pub enum NonFatal {
 	/// Some request to the runtime failed.
 	/// For example if we prune a block we're requesting info about.
-	#[error("Runtime API error")]
+	#[error("Runtime API error {0}")]
 	RuntimeRequest(RuntimeApiError),
 
 	/// We tried fetching a session info which was not available.
-	#[error("There was no session with the given index")]
+	#[error("There was no session with the given index {0}")]
 	NoSuchSession(SessionIndex),
 }
 
diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs
index f086d31c661..1e3e898f9eb 100644
--- a/polkadot/node/subsystem-util/src/runtime/mod.rs
+++ b/polkadot/node/subsystem-util/src/runtime/mod.rs
@@ -27,13 +27,14 @@ use sp_keystore::{CryptoStore, SyncCryptoStorePtr};
 
 use polkadot_node_subsystem::{SubsystemContext, SubsystemSender};
 use polkadot_primitives::v1::{
-	CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash, OccupiedCore, SessionIndex,
-	SessionInfo, Signed, SigningContext, UncheckedSigned, ValidatorId, ValidatorIndex,
+	CandidateEvent, CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash, OccupiedCore,
+	SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode,
+	ValidationCodeHash, ValidatorId, ValidatorIndex,
 };
 
 use crate::{
-	request_availability_cores, request_session_index_for_child, request_session_info,
-	request_validator_groups,
+	request_availability_cores, request_candidate_events, request_session_index_for_child,
+	request_session_info, request_validation_code_by_hash, request_validator_groups,
 };
 
 /// Errors that can happen on runtime fetches.
@@ -300,3 +301,27 @@ where
 		recv_runtime(request_validator_groups(relay_parent, ctx.sender()).await).await?;
 	Ok(info)
 }
+
+/// Get `CandidateEvent`s for the given `relay_parent`.
+pub async fn get_candidate_events<Sender>(
+	sender: &mut Sender,
+	relay_parent: Hash,
+) -> Result<Vec<CandidateEvent>>
+where
+	Sender: SubsystemSender,
+{
+	recv_runtime(request_candidate_events(relay_parent, sender).await).await
+}
+
+/// Fetch `ValidationCode` by hash from the runtime.
+pub async fn get_validation_code_by_hash<Sender>(
+	sender: &mut Sender,
+	relay_parent: Hash,
+	validation_code_hash: ValidationCodeHash,
+) -> Result<Option<ValidationCode>>
+where
+	Sender: SubsystemSender,
+{
+	recv_runtime(request_validation_code_by_hash(relay_parent, validation_code_hash, sender).await)
+		.await
+}
diff --git a/polkadot/primitives/src/v1/mod.rs b/polkadot/primitives/src/v1/mod.rs
index cac440532a5..38ade2fe1a5 100644
--- a/polkadot/primitives/src/v1/mod.rs
+++ b/polkadot/primitives/src/v1/mod.rs
@@ -1277,6 +1277,17 @@ impl DisputeStatement {
 			DisputeStatement::Invalid(_) => true,
 		}
 	}
+
+	/// Statement is backing statement.
+	pub fn is_backing(&self) -> bool {
+		match *self {
+			Self::Valid(ValidDisputeStatementKind::BackingSeconded(_)) |
+			Self::Valid(ValidDisputeStatementKind::BackingValid(_)) => true,
+			Self::Valid(ValidDisputeStatementKind::Explicit) |
+			Self::Valid(ValidDisputeStatementKind::ApprovalChecking) |
+			Self::Invalid(_) => false,
+		}
+	}
 }
 
 /// Different kinds of statements of validity on  a candidate.
diff --git a/polkadot/roadmap/implementers-guide/src/SUMMARY.md b/polkadot/roadmap/implementers-guide/src/SUMMARY.md
index 298b99d928f..7d3d9138a79 100644
--- a/polkadot/roadmap/implementers-guide/src/SUMMARY.md
+++ b/polkadot/roadmap/implementers-guide/src/SUMMARY.md
@@ -54,7 +54,6 @@
     - [Approval Distribution](node/approval/approval-distribution.md)
   - [Disputes Subsystems](node/disputes/README.md)
     - [Dispute Coordinator](node/disputes/dispute-coordinator.md)
-    - [Dispute Participation](node/disputes/dispute-participation.md)
     - [Dispute Distribution](node/disputes/dispute-distribution.md)
   - [Utility Subsystems](node/utility/README.md)
     - [Availability Store](node/utility/availability-store.md)
diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/README.md b/polkadot/roadmap/implementers-guide/src/node/approval/README.md
index ac636853084..1f65173e16b 100644
--- a/polkadot/roadmap/implementers-guide/src/node/approval/README.md
+++ b/polkadot/roadmap/implementers-guide/src/node/approval/README.md
@@ -4,4 +4,4 @@ The approval subsystems implement the node-side of the [Approval Protocol](../..
 
 We make a divide between the [assignment/voting logic](approval-voting.md) and the [distribution logic](approval-distribution.md) that distributes assignment certifications and approval votes. The logic in the assignment and voting also informs the GRANDPA voting rule on how to vote.
 
-These subsystems are intended to flag issues and begin [participating in live disputes](../disputes/dispute-participation.md). Dispute subsystems also track all observed votes (backing, approval, and dispute-specific) by all validators on all candidates.
+These subsystems are intended to flag issues and begin participating in live disputes. Dispute subsystems also track all observed votes (backing, approval, and dispute-specific) by all validators on all candidates.
diff --git a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md
index c8bf0153f49..24cc224d152 100644
--- a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md
+++ b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md
@@ -2,7 +2,7 @@
 
 This is the central subsystem of the node-side components which participate in disputes. This subsystem wraps a database which tracks all statements observed by all validators over some window of sessions. Votes older than this session window are pruned.
 
-This subsystem will be the point which produce dispute votes, either positive or negative, based on locally-observed validation results as well as a sink for votes received by other subsystems. When importing a dispute vote from another node, this will trigger the [dispute participation](dispute-participation.md) subsystem to recover and validate the block and call back to this subsystem.
+This subsystem will be the point which produce dispute votes, either positive or negative, based on locally-observed validation results as well as a sink for votes received by other subsystems. When importing a dispute vote from another node, this will trigger participation in the dispute.
 
 ## Database Schema
 
@@ -56,11 +56,10 @@ Input: [`DisputeCoordinatorMessage`][DisputeCoordinatorMessage]
 
 Output:
   - [`RuntimeApiMessage`][RuntimeApiMessage]
-  - [`DisputeParticipationMessage`][DisputeParticipationMessage]
 
 ## Functionality
 
-This assumes a constant `DISPUTE_WINDOW: SessionIndex`. This should correspond to at least 1 day.
+This assumes a constant `DISPUTE_WINDOW: SessionWindowSize`. This should correspond to at least 1 day.
 
 Ephemeral in-memory state:
 
@@ -75,8 +74,7 @@ struct State {
 
 Check DB for recorded votes for non concluded disputes we have not yet
 recorded a local statement for.
-For all of those send `DisputeParticipationMessage::Participate` message to
-dispute participation subsystem.
+For all of those initiate dispute participation.
 
 ### On `OverseerSignal::ActiveLeavesUpdate`
 
@@ -171,4 +169,3 @@ Do nothing.
 [DisputeStatement]: ../../types/disputes.md#disputestatement
 [DisputeCoordinatorMessage]: ../../types/overseer-protocol.md#dispute-coordinator-message
 [RuntimeApiMessage]: ../../types/overseer-protocol.md#runtime-api-message
-[DisputeParticipationMessage]: ../../types/overseer-protocol.md#dispute-participation-message
diff --git a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-distribution.md b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-distribution.md
index e2a52bdeb82..eb571420fb7 100644
--- a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-distribution.md
+++ b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-distribution.md
@@ -21,9 +21,9 @@ This design should result in a protocol that is:
 
 ### Output
 
-- [`DisputeCoordinatorMessage::ActiveDisputes`][DisputeParticipationMessage]
-- [`DisputeCoordinatorMessage::ImportStatements`][DisputeParticipationMessage]
-- [`DisputeCoordinatorMessage::QueryCandidateVotes`][DisputeParticipationMessage]
+- [`DisputeCoordinatorMessage::ActiveDisputes`][DisputeCoordinatorMessage]
+- [`DisputeCoordinatorMessage::ImportStatements`][DisputeCoordinatorMessage]
+- [`DisputeCoordinatorMessage::QueryCandidateVotes`][DisputeCoordinatorMessage]
 - [`RuntimeApiMessage`][RuntimeApiMessage]
 
 ### Wire format
@@ -357,4 +357,3 @@ no real harm done: There was no serious attack to begin with.
 
 [DisputeDistributionMessage]: ../../types/overseer-protocol.md#dispute-distribution-message
 [RuntimeApiMessage]: ../../types/overseer-protocol.md#runtime-api-message
-[DisputeParticipationMessage]: ../../types/overseer-protocol.md#dispute-participation-message
diff --git a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-participation.md b/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-participation.md
deleted file mode 100644
index fc0517fa4e1..00000000000
--- a/polkadot/roadmap/implementers-guide/src/node/disputes/dispute-participation.md
+++ /dev/null
@@ -1,68 +0,0 @@
-# Dispute Participation
-
-This subsystem is responsible for actually participating in disputes: when notified of a dispute, we need to recover the candidate data, validate the candidate, and cast our vote in the dispute.
-
-Fortunately, most of that work is handled by other subsystems; this subsystem is just a small glue component for tying other subsystems together and issuing statements based on their validity.
-
-## Protocol
-
-Input: [`DisputeParticipationMessage`][DisputeParticipationMessage]
-
-Output:
-  - [`RuntimeApiMessage`][RuntimeApiMessage]
-  - [`CandidateValidationMessage`][CandidateValidationMessage]
-  - [`AvailabilityRecoveryMessage`][AvailabilityRecoveryMessage]
-  - [`AvailabilityStoreMessage`][AvailabilityStoreMessage]
-  - [`ChainApiMessage`][ChainApiMessage]
-
-## Functionality
-
-In-memory state:
-
-```rust
-struct State {
-    recent_block_hash: Option<(BlockNumber, Hash)>
-}
-```
-
-### On `OverseerSignal::ActiveLeavesUpdate`
-
-Update `recent_block` in in-memory state according to the highest observed active leaf.
-
-### On `OverseerSignal::BlockFinalized`
-
-Do nothing.
-
-### On `OverseerSignal::Conclude`
-
-Conclude.
-
-### On `DisputeParticipationMessage::Participate`
-
-* Decompose into parts: `{ candidate_hash, candidate_receipt, session, voted_indices }`
-* Issue an [`AvailabilityRecoveryMessage::RecoverAvailableData`][AvailabilityRecoveryMessage]
-* Report back availability result to the `AvailabilityRecoveryMessage` sender
-  via the `report_availability` oneshot.
-* If the result is `Unavailable`, return.
-* If the result is `Invalid`, [cast invalid votes](#cast-votes) and return.
-* If the data is recovered, dispatch a [`RuntimeApiMessage::ValidationCodeByHash`][RuntimeApiMessage] with the parameters `(candidate_receipt.descriptor.validation_code_hash)` at `state.recent_block.hash`.
-* Dispatch a [`AvailabilityStoreMessage::StoreAvailableData`][AvailabilityStoreMessage] with the data.
-* If the code is not fetched from the chain, return. This should be impossible with correct relay chain configuration, at least if chain synchronization is working correctly.
-* Dispatch a [`CandidateValidationMessage::ValidateFromExhaustive`][CandidateValidationMessage] with the available data and the validation code and `APPROVAL_EXECUTION_TIMEOUT` as the timeout parameter.
-* If the validation result is `Invalid`, [cast invalid votes](#cast-votes) and return.
-* If the validation fails, [cast invalid votes](#cast-votes) and return.
-* If the validation succeeds, compute the `CandidateCommitments` based on the validation result and compare against the candidate receipt's `commitments_hash`. If they match, [cast valid votes](#cast-votes) and if not, [cast invalid votes](#cast-votes).
-
-### Cast Votes
-
-This requires the parameters `{ candidate_receipt, candidate_hash, session, voted_indices }` as well as a choice of either `Valid` or `Invalid`.
-
-Invoke [`DisputeCoordinatorMessage::IssueLocalStatement`][DisputeCoordinatorMessage] with `is_valid` according to the parameterization,.
-
-[RuntimeApiMessage]: ../../types/overseer-protocol.md#runtime-api-message
-[DisputeParticipationMessage]: ../../types/overseer-protocol.md#dispute-participation-message
-[DisputeCoordinatorMessage]: ../../types/overseer-protocol.md#dispute-coordinator-message
-[CandidateValidationMessage]: ../../types/overseer-protocol.md#candidate-validation-message
-[AvailabilityRecoveryMessage]: ../../types/overseer-protocol.md#availability-recovery-message
-[ChainApiMessage]: ../../types/overseer-protocol.md#chain-api-message
-[AvailabilityStoreMessage]: ../../types/overseer-protocol.md#availability-store-message
diff --git a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md
index 0e06d30753a..61a87469783 100644
--- a/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md
+++ b/polkadot/roadmap/implementers-guide/src/types/overseer-protocol.md
@@ -69,7 +69,6 @@ enum AllMessages {
     ApprovalDistribution(ApprovalDistributionMessage),
     GossipSupport(GossipSupportMessage),
     DisputeCoordinator(DisputeCoordinatorMessage),
-    DisputeParticipation(DisputeParticipationMessage),
     ChainSelection(ChainSelectionMessage),
 }
 ```
@@ -473,30 +472,6 @@ pub enum ImportStatementsResult {
 }
 ```
 
-## Dispute Participation Message
-
-Messages received by the [Dispute Participation subsystem](../node/disputes/dispute-participation.md)
-
-This subsystem simply executes requests to evaluate a candidate.
-
-```rust
-enum DisputeParticipationMessage {
-    /// Validate a candidate for the purposes of participating in a dispute.
-    Participate {
-        /// The hash of the candidate
-        candidate_hash: CandidateHash,
-        /// The candidate receipt itself.
-        candidate_receipt: CandidateReceipt,
-        /// The session the candidate appears in.
-        session: SessionIndex,
-        /// The number of validators in the session.
-        n_validators: u32,
-        /// Give immediate feedback on whether the candidate was available or
-        /// not.
-        report_availability: oneshot::Sender<bool>,
-    }
-}
-```
 
 ## Dispute Distribution Message
 
-- 
GitLab