diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs
index df775f6c9ec07d00cba1c7aeee60510691ef210a..4c191c7d8a1a0882905fd77ec1ab0af90b213413 100644
--- a/cumulus/client/consensus/aura/src/collators/mod.rs
+++ b/cumulus/client/consensus/aura/src/collators/mod.rs
@@ -22,19 +22,18 @@
 
 use crate::collator::SlotClaim;
 use codec::Codec;
-use cumulus_client_consensus_common::{
-	self as consensus_common, load_abridged_host_configuration, ParentSearchParams,
-};
+use cumulus_client_consensus_common::{self as consensus_common, ParentSearchParams};
 use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot};
 use cumulus_primitives_core::{relay_chain::Hash as ParaHash, BlockT, ClaimQueueOffset};
 use cumulus_relay_chain_interface::RelayChainInterface;
+use polkadot_node_subsystem::messages::RuntimeApiRequest;
 use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot;
 use polkadot_primitives::{
-	AsyncBackingParams, CoreIndex, Hash as RelayHash, Id as ParaId, OccupiedCoreAssumption,
-	ValidationCodeHash,
+	CoreIndex, Hash as RelayHash, Id as ParaId, OccupiedCoreAssumption, ValidationCodeHash,
+	DEFAULT_SCHEDULING_LOOKAHEAD,
 };
 use sc_consensus_aura::{standalone as aura_internal, AuraApi};
-use sp_api::{ApiExt, ProvideRuntimeApi};
+use sp_api::{ApiExt, ProvideRuntimeApi, RuntimeApiInfo};
 use sp_core::Pair;
 use sp_keystore::KeystorePtr;
 use sp_timestamp::Timestamp;
@@ -102,26 +101,43 @@ async fn check_validation_code_or_log(
 	}
 }
 
-/// Reads async backing parameters from the relay chain storage at the given relay parent.
-async fn async_backing_params(
+/// Fetch scheduling lookahead at given relay parent.
+async fn scheduling_lookahead(
 	relay_parent: RelayHash,
 	relay_client: &impl RelayChainInterface,
-) -> Option<AsyncBackingParams> {
-	match load_abridged_host_configuration(relay_parent, relay_client).await {
-		Ok(Some(config)) => Some(config.async_backing_params),
-		Ok(None) => {
+) -> Option<u32> {
+	let runtime_api_version = relay_client
+		.version(relay_parent)
+		.await
+		.map_err(|e| {
 			tracing::error!(
-				target: crate::LOG_TARGET,
-				"Active config is missing in relay chain storage",
-			);
-			None
-		},
+				target: super::LOG_TARGET,
+				error = ?e,
+				"Failed to fetch relay chain runtime version.",
+			)
+		})
+		.ok()?;
+
+	let parachain_host_runtime_api_version = runtime_api_version
+		.api_version(
+			&<dyn polkadot_primitives::runtime_api::ParachainHost<polkadot_primitives::Block>>::ID,
+		)
+		.unwrap_or_default();
+
+	if parachain_host_runtime_api_version <
+		RuntimeApiRequest::SCHEDULING_LOOKAHEAD_RUNTIME_REQUIREMENT
+	{
+		return None
+	}
+
+	match relay_client.scheduling_lookahead(relay_parent).await {
+		Ok(scheduling_lookahead) => Some(scheduling_lookahead),
 		Err(err) => {
 			tracing::error!(
 				target: crate::LOG_TARGET,
 				?err,
 				?relay_parent,
-				"Failed to read active config from relay chain client",
+				"Failed to fetch scheduling lookahead from relay chain",
 			);
 			None
 		},
@@ -217,9 +233,10 @@ where
 	let parent_search_params = ParentSearchParams {
 		relay_parent,
 		para_id,
-		ancestry_lookback: crate::collators::async_backing_params(relay_parent, relay_client)
+		ancestry_lookback: scheduling_lookahead(relay_parent, relay_client)
 			.await
-			.map_or(0, |params| params.allowed_ancestry_len as usize),
+			.unwrap_or(DEFAULT_SCHEDULING_LOOKAHEAD)
+			.saturating_sub(1) as usize,
 		max_depth: PARENT_SEARCH_DEPTH,
 		ignore_alternative_branches: true,
 	};
diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs
index 79e620db3bfa0c82860fa5fcce28a8c5fea5b975..2eff183fc47375e7cff8633798c2d33384b2060e 100644
--- a/cumulus/client/consensus/common/src/tests.rs
+++ b/cumulus/client/consensus/common/src/tests.rs
@@ -284,6 +284,10 @@ impl RelayChainInterface for Relaychain {
 	) -> RelayChainResult<Vec<u8>> {
 		unimplemented!("Not needed for test")
 	}
+
+	async fn scheduling_lookahead(&self, _: PHash) -> RelayChainResult<u32> {
+		unimplemented!("Not needed for test")
+	}
 }
 
 fn sproof_with_best_parent(client: &Client) -> RelayStateSproofBuilder {
diff --git a/cumulus/client/network/src/tests.rs b/cumulus/client/network/src/tests.rs
index cccb710bf18f1122d8675ee0bfca23e2075b3d47..3bdcdaae4ef67e679b1655149383e78c8ec03c36 100644
--- a/cumulus/client/network/src/tests.rs
+++ b/cumulus/client/network/src/tests.rs
@@ -347,6 +347,10 @@ impl RelayChainInterface for DummyRelayChainInterface {
 	) -> RelayChainResult<Vec<u8>> {
 		unimplemented!("Not needed for test")
 	}
+
+	async fn scheduling_lookahead(&self, _: PHash) -> RelayChainResult<u32> {
+		unimplemented!("Not needed for test")
+	}
 }
 
 fn make_validator_and_api() -> (
diff --git a/cumulus/client/pov-recovery/src/tests.rs b/cumulus/client/pov-recovery/src/tests.rs
index 91b462e06bf87bf9fb264e638daa77547b8ce660..be890d01dd96721402e00d5eaa07fd763e03079c 100644
--- a/cumulus/client/pov-recovery/src/tests.rs
+++ b/cumulus/client/pov-recovery/src/tests.rs
@@ -503,6 +503,10 @@ impl RelayChainInterface for Relaychain {
 	) -> RelayChainResult<Vec<u8>> {
 		unimplemented!("Not needed for test")
 	}
+
+	async fn scheduling_lookahead(&self, _: PHash) -> RelayChainResult<u32> {
+		unimplemented!("Not needed for test")
+	}
 }
 
 fn make_candidate_chain(candidate_number_range: Range<u32>) -> Vec<CommittedCandidateReceipt> {
diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs
index f29e7f3ed7c7c5eb55cbe80a2c428c112e58cd60..e5daf8ee7b5878b5dbae3401b3688f916434ebbe 100644
--- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs
+++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs
@@ -316,6 +316,10 @@ impl RelayChainInterface for RelayChainInProcessInterface {
 	) -> RelayChainResult<BTreeMap<CoreIndex, VecDeque<ParaId>>> {
 		Ok(self.full_client.runtime_api().claim_queue(hash)?)
 	}
+
+	async fn scheduling_lookahead(&self, hash: PHash) -> RelayChainResult<u32> {
+		Ok(self.full_client.runtime_api().scheduling_lookahead(hash)?)
+	}
 }
 
 pub enum BlockCheckStatus {
diff --git a/cumulus/client/relay-chain-interface/src/lib.rs b/cumulus/client/relay-chain-interface/src/lib.rs
index 4a49eada292ac83ff489e4a8f12d2aa60e7ef300..f1d5e013ba6a21a9f267ef05ea101fddaa973003 100644
--- a/cumulus/client/relay-chain-interface/src/lib.rs
+++ b/cumulus/client/relay-chain-interface/src/lib.rs
@@ -244,6 +244,9 @@ pub trait RelayChainInterface: Send + Sync {
 		&self,
 		relay_parent: PHash,
 	) -> RelayChainResult<BTreeMap<CoreIndex, VecDeque<ParaId>>>;
+
+	/// Fetch the scheduling lookahead value.
+	async fn scheduling_lookahead(&self, relay_parent: PHash) -> RelayChainResult<u32>;
 }
 
 #[async_trait]
@@ -398,6 +401,10 @@ where
 	) -> RelayChainResult<BTreeMap<CoreIndex, VecDeque<ParaId>>> {
 		(**self).claim_queue(relay_parent).await
 	}
+
+	async fn scheduling_lookahead(&self, relay_parent: PHash) -> RelayChainResult<u32> {
+		(**self).scheduling_lookahead(relay_parent).await
+	}
 }
 
 /// Helper function to call an arbitrary runtime API using a `RelayChainInterface` client.
diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs
index 862cf6af97956802ce4dd871094db38c57851b74..cfd5bd951333dc5436d90ad9c83033a75628debe 100644
--- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs
+++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs
@@ -464,6 +464,10 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient {
 	) -> Result<Option<Constraints>, ApiError> {
 		Ok(self.rpc_client.parachain_host_backing_constraints(at, para_id).await?)
 	}
+
+	async fn scheduling_lookahead(&self, at: Hash) -> Result<u32, sp_api::ApiError> {
+		Ok(self.rpc_client.parachain_host_scheduling_lookahead(at).await?)
+	}
 }
 
 #[async_trait::async_trait]
diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs
index 0e2f6c054c403607754e494a3ce6b595ad2bcd6b..a895d8f3e5f26239eafc40db45d85d5e86a807d7 100644
--- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs
+++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs
@@ -282,4 +282,8 @@ impl RelayChainInterface for RelayChainRpcInterface {
 	> {
 		self.rpc_client.parachain_host_claim_queue(relay_parent).await
 	}
+
+	async fn scheduling_lookahead(&self, relay_parent: RelayHash) -> RelayChainResult<u32> {
+		self.rpc_client.parachain_host_scheduling_lookahead(relay_parent).await
+	}
 }
diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs
index 0467b7085ca020f43dd4ab8442135e443850b96a..1cd9d0c11eeddbca8b02831c89016ac869479073 100644
--- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs
+++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs
@@ -706,6 +706,14 @@ impl RelayChainRpcClient {
 		.await
 	}
 
+	pub async fn parachain_host_scheduling_lookahead(
+		&self,
+		at: RelayHash,
+	) -> Result<u32, RelayChainError> {
+		self.call_remote_runtime_function("ParachainHost_scheduling_lookahead", at, None::<()>)
+			.await
+	}
+
 	pub async fn validation_code_hash(
 		&self,
 		at: RelayHash,
diff --git a/cumulus/zombienet/tests/0008-elastic_authoring.toml b/cumulus/zombienet/tests/0008-elastic_authoring.toml
index f2e2010a9e4582feefaebdaa355ab96b6a8f7695..516c152471b113e6c527045c9aaff1b58c8c1fed 100644
--- a/cumulus/zombienet/tests/0008-elastic_authoring.toml
+++ b/cumulus/zombienet/tests/0008-elastic_authoring.toml
@@ -1,10 +1,6 @@
 [settings]
 timeout = 1000
 
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params]
-  max_candidate_depth = 6
-  allowed_ancestry_len = 3
-
 [relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
   max_validators_per_core = 1
   num_cores = 4
diff --git a/cumulus/zombienet/tests/0009-elastic_pov_recovery.toml b/cumulus/zombienet/tests/0009-elastic_pov_recovery.toml
index 1cf0775a2e177be85445de1f5a85c745670a76ee..b65ed77ec1ba5bd986f9505c0915b15fbd98eee7 100644
--- a/cumulus/zombienet/tests/0009-elastic_pov_recovery.toml
+++ b/cumulus/zombienet/tests/0009-elastic_pov_recovery.toml
@@ -9,10 +9,6 @@ requests = { memory = "2G", cpu = "1" }
 limits = { memory = "4G", cpu = "2" }
 requests = { memory = "2G", cpu = "1" }
 
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params]
-  max_candidate_depth = 6
-  allowed_ancestry_len = 3
-
 [relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
   max_validators_per_core = 1
   num_cores = 4
diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs
index 2a4643031bf8788b1def68b76a916ab32a355821..24590fe0c90ea9f8a1e164efc52cd5da18dbf436 100644
--- a/polkadot/node/core/candidate-validation/src/lib.rs
+++ b/polkadot/node/core/candidate-validation/src/lib.rs
@@ -39,9 +39,9 @@ use polkadot_node_subsystem::{
 };
 use polkadot_node_subsystem_util::{
 	self as util,
-	runtime::{prospective_parachains_mode, ClaimQueueSnapshot, ProspectiveParachainsMode},
+	runtime::{fetch_scheduling_lookahead, ClaimQueueSnapshot},
 };
-use polkadot_overseer::ActiveLeavesUpdate;
+use polkadot_overseer::{ActivatedLeaf, ActiveLeavesUpdate};
 use polkadot_parachain_primitives::primitives::ValidationResult as WasmValidationResult;
 use polkadot_primitives::{
 	executor_params::{
@@ -158,7 +158,7 @@ where
 	Sender: SubsystemSender<RuntimeApiMessage>,
 {
 	match util::runtime::fetch_claim_queue(sender, relay_parent).await {
-		Ok(maybe_cq) => maybe_cq,
+		Ok(cq) => Some(cq),
 		Err(err) => {
 			gum::warn!(
 				target: LOG_TARGET,
@@ -190,40 +190,30 @@ where
 			exec_kind,
 			response_sender,
 			..
-		} =>
-			async move {
-				let _timer = metrics.time_validate_from_exhaustive();
-				let relay_parent = candidate_receipt.descriptor.relay_parent();
-
-				let maybe_claim_queue = claim_queue(relay_parent, &mut sender).await;
-
-				let maybe_expected_session_index =
-					match util::request_session_index_for_child(relay_parent, &mut sender)
-						.await
-						.await
-					{
-						Ok(Ok(expected_session_index)) => Some(expected_session_index),
-						_ => None,
-					};
-
-				let res = validate_candidate_exhaustive(
-					maybe_expected_session_index,
-					validation_host,
-					validation_data,
-					validation_code,
-					candidate_receipt,
-					pov,
-					executor_params,
-					exec_kind,
-					&metrics,
-					maybe_claim_queue,
-				)
-				.await;
+		} => async move {
+			let _timer = metrics.time_validate_from_exhaustive();
+			let relay_parent = candidate_receipt.descriptor.relay_parent();
+
+			let maybe_claim_queue = claim_queue(relay_parent, &mut sender).await;
+
+			let res = validate_candidate_exhaustive(
+				get_session_index(&mut sender, relay_parent).await,
+				validation_host,
+				validation_data,
+				validation_code,
+				candidate_receipt,
+				pov,
+				executor_params,
+				exec_kind,
+				&metrics,
+				maybe_claim_queue,
+			)
+			.await;
 
-				metrics.on_validation_event(&res);
-				let _ = response_sender.send(res);
-			}
-			.boxed(),
+			metrics.on_validation_event(&res);
+			let _ = response_sender.send(res);
+		}
+		.boxed(),
 		CandidateValidationMessage::PreCheck {
 			relay_parent,
 			validation_code_hash,
@@ -257,7 +247,7 @@ async fn run<Context>(
 		pvf_prepare_workers_hard_max_num,
 	}: Config,
 ) -> SubsystemResult<()> {
-	let (validation_host, task) = polkadot_node_core_pvf::start(
+	let (mut validation_host, task) = polkadot_node_core_pvf::start(
 		polkadot_node_core_pvf::Config::new(
 			artifacts_cache_path,
 			node_version,
@@ -282,8 +272,13 @@ async fn run<Context>(
 				comm = ctx.recv().fuse() => {
 					match comm {
 						Ok(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update))) => {
-							update_active_leaves(ctx.sender(), validation_host.clone(), update.clone()).await;
-							maybe_prepare_validation(ctx.sender(), keystore.clone(), validation_host.clone(), update, &mut prepare_state).await;
+							handle_active_leaves_update(
+								ctx.sender(),
+								keystore.clone(),
+								&mut validation_host,
+								update,
+								&mut prepare_state,
+							).await
 						},
 						Ok(FromOrchestra::Signal(OverseerSignal::BlockFinalized(..))) => {},
 						Ok(FromOrchestra::Signal(OverseerSignal::Conclude)) => return Ok(()),
@@ -343,17 +338,46 @@ impl Default for PrepareValidationState {
 	}
 }
 
-async fn maybe_prepare_validation<Sender>(
+async fn handle_active_leaves_update<Sender>(
 	sender: &mut Sender,
 	keystore: KeystorePtr,
-	validation_backend: impl ValidationBackend,
+	validation_host: &mut impl ValidationBackend,
 	update: ActiveLeavesUpdate,
+	prepare_state: &mut PrepareValidationState,
+) where
+	Sender: SubsystemSender<ChainApiMessage> + SubsystemSender<RuntimeApiMessage>,
+{
+	let maybe_session_index = update_active_leaves(sender, validation_host, update.clone()).await;
+
+	if let Some(activated) = update.activated {
+		let maybe_new_session_index = match (prepare_state.session_index, maybe_session_index) {
+			(Some(existing_index), Some(new_index)) =>
+				(new_index > existing_index).then_some(new_index),
+			(None, Some(new_index)) => Some(new_index),
+			_ => None,
+		};
+		maybe_prepare_validation(
+			sender,
+			keystore.clone(),
+			validation_host,
+			activated,
+			prepare_state,
+			maybe_new_session_index,
+		)
+		.await;
+	}
+}
+
+async fn maybe_prepare_validation<Sender>(
+	sender: &mut Sender,
+	keystore: KeystorePtr,
+	validation_backend: &mut impl ValidationBackend,
+	leaf: ActivatedLeaf,
 	state: &mut PrepareValidationState,
+	new_session_index: Option<SessionIndex>,
 ) where
 	Sender: SubsystemSender<RuntimeApiMessage>,
 {
-	let Some(leaf) = update.activated else { return };
-	let new_session_index = new_session_index(sender, state.session_index, leaf.hash).await;
 	if new_session_index.is_some() {
 		state.session_index = new_session_index;
 		state.already_prepared_code_hashes.clear();
@@ -380,16 +404,11 @@ async fn maybe_prepare_validation<Sender>(
 	}
 }
 
-// Returns the new session index if it is greater than the current one.
-async fn new_session_index<Sender>(
-	sender: &mut Sender,
-	session_index: Option<SessionIndex>,
-	relay_parent: Hash,
-) -> Option<SessionIndex>
+async fn get_session_index<Sender>(sender: &mut Sender, relay_parent: Hash) -> Option<SessionIndex>
 where
 	Sender: SubsystemSender<RuntimeApiMessage>,
 {
-	let Ok(Ok(new_session_index)) =
+	let Ok(Ok(session_index)) =
 		util::request_session_index_for_child(relay_parent, sender).await.await
 	else {
 		gum::warn!(
@@ -400,13 +419,7 @@ where
 		return None
 	};
 
-	session_index.map_or(Some(new_session_index), |index| {
-		if new_session_index > index {
-			Some(new_session_index)
-		} else {
-			None
-		}
-	})
+	Some(session_index)
 }
 
 // Returns true if the node is an authority in the next session.
@@ -460,7 +473,7 @@ where
 // Sends PVF with unknown code hashes to the validation host returning the list of code hashes sent.
 async fn prepare_pvfs_for_backed_candidates<Sender>(
 	sender: &mut Sender,
-	mut validation_backend: impl ValidationBackend,
+	validation_backend: &mut impl ValidationBackend,
 	relay_parent: Hash,
 	already_prepared: &HashSet<ValidationCodeHash>,
 	per_block_limit: usize,
@@ -557,12 +570,21 @@ where
 
 async fn update_active_leaves<Sender>(
 	sender: &mut Sender,
-	mut validation_backend: impl ValidationBackend,
+	validation_backend: &mut impl ValidationBackend,
 	update: ActiveLeavesUpdate,
-) where
+) -> Option<SessionIndex>
+where
 	Sender: SubsystemSender<ChainApiMessage> + SubsystemSender<RuntimeApiMessage>,
 {
-	let ancestors = get_block_ancestors(sender, update.activated.as_ref().map(|x| x.hash)).await;
+	let maybe_new_leaf = if let Some(activated) = &update.activated {
+		get_session_index(sender, activated.hash)
+			.await
+			.map(|index| (activated.hash, index))
+	} else {
+		None
+	};
+
+	let ancestors = get_block_ancestors(sender, maybe_new_leaf).await;
 	if let Err(err) = validation_backend.update_active_leaves(update, ancestors).await {
 		gum::warn!(
 			target: LOG_TARGET,
@@ -570,39 +592,33 @@ async fn update_active_leaves<Sender>(
 			"cannot update active leaves in validation backend",
 		);
 	};
-}
 
-async fn get_allowed_ancestry_len<Sender>(sender: &mut Sender, relay_parent: Hash) -> Option<usize>
-where
-	Sender: SubsystemSender<ChainApiMessage> + SubsystemSender<RuntimeApiMessage>,
-{
-	match prospective_parachains_mode(sender, relay_parent).await {
-		Ok(ProspectiveParachainsMode::Enabled { allowed_ancestry_len, .. }) =>
-			Some(allowed_ancestry_len),
-		res => {
-			gum::warn!(target: LOG_TARGET, ?res, "async backing is disabled");
-			None
-		},
-	}
+	maybe_new_leaf.map(|l| l.1)
 }
 
 async fn get_block_ancestors<Sender>(
 	sender: &mut Sender,
-	maybe_relay_parent: Option<Hash>,
+	maybe_new_leaf: Option<(Hash, SessionIndex)>,
 ) -> Vec<Hash>
 where
 	Sender: SubsystemSender<ChainApiMessage> + SubsystemSender<RuntimeApiMessage>,
 {
-	let Some(relay_parent) = maybe_relay_parent else { return vec![] };
-	let Some(allowed_ancestry_len) = get_allowed_ancestry_len(sender, relay_parent).await else {
-		return vec![]
-	};
+	let Some((relay_parent, session_index)) = maybe_new_leaf else { return vec![] };
+	let scheduling_lookahead =
+		match fetch_scheduling_lookahead(relay_parent, session_index, sender).await {
+			Ok(scheduling_lookahead) => scheduling_lookahead,
+			res => {
+				gum::warn!(target: LOG_TARGET, ?res, "Failed to request scheduling lookahead");
+				return vec![]
+			},
+		};
 
 	let (tx, rx) = oneshot::channel();
 	sender
 		.send_message(ChainApiMessage::Ancestors {
 			hash: relay_parent,
-			k: allowed_ancestry_len,
+			// Subtract 1 from the claim queue length, as it includes current `relay_parent`.
+			k: scheduling_lookahead.saturating_sub(1) as usize,
 			response_channel: tx,
 		})
 		.await;
diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs
index 795d7c93f8a70ee939cf4580118efca3381f0d63..ee72daa1f86eb5988c9e8f0aa52549344621454b 100644
--- a/polkadot/node/core/candidate-validation/src/tests.rs
+++ b/polkadot/node/core/candidate-validation/src/tests.rs
@@ -26,6 +26,7 @@ use futures::executor;
 use polkadot_node_core_pvf::PrepareError;
 use polkadot_node_primitives::{BlockData, VALIDATION_CODE_BOMB_LIMIT};
 use polkadot_node_subsystem::messages::AllMessages;
+use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystemContextHandle};
 use polkadot_node_subsystem_util::reexports::SubsystemContext;
 use polkadot_overseer::ActivatedLeaf;
 use polkadot_primitives::{
@@ -34,7 +35,7 @@ use polkadot_primitives::{
 		MutateDescriptorV2, UMPSignal, UMP_SEPARATOR,
 	},
 	CandidateDescriptor, CoreIndex, GroupIndex, HeadData, Id as ParaId, OccupiedCoreAssumption,
-	SessionInfo, UpwardMessage, ValidatorId,
+	SessionInfo, UpwardMessage, ValidatorId, DEFAULT_SCHEDULING_LOOKAHEAD,
 };
 use polkadot_primitives_test_helpers::{
 	dummy_collator, dummy_collator_signature, dummy_hash, make_valid_candidate_descriptor,
@@ -119,10 +120,7 @@ fn correctly_checks_included_assumption() {
 	.into();
 
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::<
-		AllMessages,
-		_,
-	>(pool.clone());
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool.clone());
 
 	let (check_fut, check_result) = check_assumption_validation_data(
 		ctx.sender(),
@@ -194,10 +192,7 @@ fn correctly_checks_timed_out_assumption() {
 	.into();
 
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::<
-		AllMessages,
-		_,
-	>(pool.clone());
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool.clone());
 
 	let (check_fut, check_result) = check_assumption_validation_data(
 		ctx.sender(),
@@ -267,10 +262,7 @@ fn check_is_bad_request_if_no_validation_data() {
 	.into();
 
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::<
-		AllMessages,
-		_,
-	>(pool.clone());
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool.clone());
 
 	let (check_fut, check_result) = check_assumption_validation_data(
 		ctx.sender(),
@@ -324,10 +316,7 @@ fn check_is_bad_request_if_no_validation_code() {
 	.into();
 
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::<
-		AllMessages,
-		_,
-	>(pool.clone());
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool.clone());
 
 	let (check_fut, check_result) = check_assumption_validation_data(
 		ctx.sender(),
@@ -393,10 +382,7 @@ fn check_does_not_match() {
 	.into();
 
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::<
-		AllMessages,
-		_,
-	>(pool.clone());
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool.clone());
 
 	let (check_fut, check_result) = check_assumption_validation_data(
 		ctx.sender(),
@@ -1393,10 +1379,7 @@ fn candidate_validation_code_mismatch_is_invalid() {
 	let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() };
 
 	let pool = TaskExecutor::new();
-	let (_ctx, _ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::<
-		AllMessages,
-		_,
-	>(pool.clone());
+	let (_ctx, _ctx_handle) = make_subsystem_context::<AllMessages, _>(pool.clone());
 
 	let v = executor::block_on(validate_candidate_exhaustive(
 		Some(1),
@@ -1524,10 +1507,7 @@ fn precheck_works() {
 	let validation_code_hash = validation_code.hash();
 
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::<
-		AllMessages,
-		_,
-	>(pool.clone());
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool.clone());
 
 	let (check_fut, check_result) = precheck_pvf(
 		ctx.sender(),
@@ -1584,10 +1564,7 @@ fn precheck_properly_classifies_outcomes() {
 		let validation_code_hash = validation_code.hash();
 
 		let pool = TaskExecutor::new();
-		let (mut ctx, mut ctx_handle) =
-			polkadot_node_subsystem_test_helpers::make_subsystem_context::<AllMessages, _>(
-				pool.clone(),
-			);
+		let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool.clone());
 
 		let (check_fut, check_result) = precheck_pvf(
 			ctx.sender(),
@@ -1677,7 +1654,7 @@ impl ValidationBackend for MockHeadsUp {
 		_update: ActiveLeavesUpdate,
 		_ancestors: Vec<Hash>,
 	) -> Result<(), String> {
-		unreachable!()
+		Ok(())
 	}
 }
 
@@ -1754,28 +1731,51 @@ fn dummy_session_info(keys: Vec<Public>) -> SessionInfo {
 	}
 }
 
+async fn assert_new_active_leaf_messages(
+	recv_handle: &mut TestSubsystemContextHandle<AllMessages>,
+	expected_session_index: SessionIndex,
+) {
+	assert_matches!(
+		recv_handle.recv().await,
+		AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => {
+			let _ = tx.send(Ok(expected_session_index));
+		}
+	);
+
+	let lookahead_value = DEFAULT_SCHEDULING_LOOKAHEAD;
+	assert_matches!(
+		recv_handle.recv().await,
+		AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SchedulingLookahead(index, tx))) => {
+			assert_eq!(index, expected_session_index);
+			let _ = tx.send(Ok(lookahead_value));
+		}
+	);
+
+	assert_matches!(
+		recv_handle.recv().await,
+		AllMessages::ChainApi(ChainApiMessage::Ancestors {k, response_channel, ..}) => {
+			assert_eq!(k as u32, lookahead_value - 1);
+			let _ = response_channel.send(Ok((0..(lookahead_value - 1)).into_iter().map(|i| Hash::from_low_u64_be(i as u64)).collect()));
+		}
+	);
+}
+
 #[test]
 fn maybe_prepare_validation_golden_path() {
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) =
-		polkadot_node_subsystem_test_helpers::make_subsystem_context::<AllMessages, _>(pool);
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool);
 
 	let keystore = alice_keystore();
-	let backend = MockHeadsUp::default();
+	let mut backend = MockHeadsUp::default();
 	let activated_hash = Hash::random();
 	let update = dummy_active_leaves_update(activated_hash);
 	let mut state = PrepareValidationState::default();
 
 	let check_fut =
-		maybe_prepare_validation(ctx.sender(), keystore, backend.clone(), update, &mut state);
+		handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state);
 
 	let test_fut = async move {
-		assert_matches!(
-			ctx_handle.recv().await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => {
-				let _ = tx.send(Ok(1));
-			}
-		);
+		assert_new_active_leaf_messages(&mut ctx_handle, 1).await;
 
 		assert_matches!(
 			ctx_handle.recv().await,
@@ -1834,11 +1834,10 @@ fn maybe_prepare_validation_golden_path() {
 #[test]
 fn maybe_prepare_validation_checkes_authority_once_per_session() {
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) =
-		polkadot_node_subsystem_test_helpers::make_subsystem_context::<AllMessages, _>(pool);
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool);
 
 	let keystore = alice_keystore();
-	let backend = MockHeadsUp::default();
+	let mut backend = MockHeadsUp::default();
 	let activated_hash = Hash::random();
 	let update = dummy_active_leaves_update(activated_hash);
 	let mut state = PrepareValidationState {
@@ -1848,16 +1847,9 @@ fn maybe_prepare_validation_checkes_authority_once_per_session() {
 	};
 
 	let check_fut =
-		maybe_prepare_validation(ctx.sender(), keystore, backend.clone(), update, &mut state);
+		handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state);
 
-	let test_fut = async move {
-		assert_matches!(
-			ctx_handle.recv().await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => {
-				let _ = tx.send(Ok(1));
-			}
-		);
-	};
+	let test_fut = assert_new_active_leaf_messages(&mut ctx_handle, 1);
 
 	let test_fut = future::join(test_fut, check_fut);
 	executor::block_on(test_fut);
@@ -1870,11 +1862,10 @@ fn maybe_prepare_validation_checkes_authority_once_per_session() {
 #[test]
 fn maybe_prepare_validation_resets_state_on_a_new_session() {
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) =
-		polkadot_node_subsystem_test_helpers::make_subsystem_context::<AllMessages, _>(pool);
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool);
 
 	let keystore = alice_keystore();
-	let backend = MockHeadsUp::default();
+	let mut backend = MockHeadsUp::default();
 	let activated_hash = Hash::random();
 	let update = dummy_active_leaves_update(activated_hash);
 	let mut state = PrepareValidationState {
@@ -1885,15 +1876,10 @@ fn maybe_prepare_validation_resets_state_on_a_new_session() {
 	};
 
 	let check_fut =
-		maybe_prepare_validation(ctx.sender(), keystore, backend.clone(), update, &mut state);
+		handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state);
 
 	let test_fut = async move {
-		assert_matches!(
-			ctx_handle.recv().await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => {
-				let _ = tx.send(Ok(2));
-			}
-		);
+		assert_new_active_leaf_messages(&mut ctx_handle, 2).await;
 
 		assert_matches!(
 			ctx_handle.recv().await,
@@ -1923,26 +1909,18 @@ fn maybe_prepare_validation_resets_state_on_a_new_session() {
 #[test]
 fn maybe_prepare_validation_does_not_prepare_pvfs_if_no_new_session_and_not_a_validator() {
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) =
-		polkadot_node_subsystem_test_helpers::make_subsystem_context::<AllMessages, _>(pool);
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool);
 
 	let keystore = alice_keystore();
-	let backend = MockHeadsUp::default();
+	let mut backend = MockHeadsUp::default();
 	let activated_hash = Hash::random();
 	let update = dummy_active_leaves_update(activated_hash);
 	let mut state = PrepareValidationState { session_index: Some(1), ..Default::default() };
 
 	let check_fut =
-		maybe_prepare_validation(ctx.sender(), keystore, backend.clone(), update, &mut state);
+		handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state);
 
-	let test_fut = async move {
-		assert_matches!(
-			ctx_handle.recv().await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => {
-				let _ = tx.send(Ok(1));
-			}
-		);
-	};
+	let test_fut = assert_new_active_leaf_messages(&mut ctx_handle, 1);
 
 	let test_fut = future::join(test_fut, check_fut);
 	executor::block_on(test_fut);
@@ -1955,11 +1933,10 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_no_new_session_and_not_a_va
 #[test]
 fn maybe_prepare_validation_does_not_prepare_pvfs_if_no_new_session_but_a_validator() {
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) =
-		polkadot_node_subsystem_test_helpers::make_subsystem_context::<AllMessages, _>(pool);
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool);
 
 	let keystore = alice_keystore();
-	let backend = MockHeadsUp::default();
+	let mut backend = MockHeadsUp::default();
 	let activated_hash = Hash::random();
 	let update = dummy_active_leaves_update(activated_hash);
 	let mut state = PrepareValidationState {
@@ -1969,15 +1946,10 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_no_new_session_but_a_valida
 	};
 
 	let check_fut =
-		maybe_prepare_validation(ctx.sender(), keystore, backend.clone(), update, &mut state);
+		handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state);
 
 	let test_fut = async move {
-		assert_matches!(
-			ctx_handle.recv().await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => {
-				let _ = tx.send(Ok(1));
-			}
-		);
+		assert_new_active_leaf_messages(&mut ctx_handle, 1).await;
 
 		assert_matches!(
 			ctx_handle.recv().await,
@@ -2021,25 +1993,19 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_no_new_session_but_a_valida
 #[test]
 fn maybe_prepare_validation_does_not_prepare_pvfs_if_not_a_validator_in_the_next_session() {
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) =
-		polkadot_node_subsystem_test_helpers::make_subsystem_context::<AllMessages, _>(pool);
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool);
 
 	let keystore = alice_keystore();
-	let backend = MockHeadsUp::default();
+	let mut backend = MockHeadsUp::default();
 	let activated_hash = Hash::random();
 	let update = dummy_active_leaves_update(activated_hash);
 	let mut state = PrepareValidationState::default();
 
 	let check_fut =
-		maybe_prepare_validation(ctx.sender(), keystore, backend.clone(), update, &mut state);
+		handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state);
 
 	let test_fut = async move {
-		assert_matches!(
-			ctx_handle.recv().await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => {
-				let _ = tx.send(Ok(1));
-			}
-		);
+		assert_new_active_leaf_messages(&mut ctx_handle, 1).await;
 
 		assert_matches!(
 			ctx_handle.recv().await,
@@ -2068,25 +2034,19 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_not_a_validator_in_the_next
 #[test]
 fn maybe_prepare_validation_does_not_prepare_pvfs_if_a_validator_in_the_current_session() {
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) =
-		polkadot_node_subsystem_test_helpers::make_subsystem_context::<AllMessages, _>(pool);
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool);
 
 	let keystore = alice_keystore();
-	let backend = MockHeadsUp::default();
+	let mut backend = MockHeadsUp::default();
 	let activated_hash = Hash::random();
 	let update = dummy_active_leaves_update(activated_hash);
 	let mut state = PrepareValidationState::default();
 
 	let check_fut =
-		maybe_prepare_validation(ctx.sender(), keystore, backend.clone(), update, &mut state);
+		handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state);
 
 	let test_fut = async move {
-		assert_matches!(
-			ctx_handle.recv().await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => {
-				let _ = tx.send(Ok(1));
-			}
-		);
+		assert_new_active_leaf_messages(&mut ctx_handle, 1).await;
 
 		assert_matches!(
 			ctx_handle.recv().await,
@@ -2115,25 +2075,19 @@ fn maybe_prepare_validation_does_not_prepare_pvfs_if_a_validator_in_the_current_
 #[test]
 fn maybe_prepare_validation_prepares_a_limited_number_of_pvfs() {
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) =
-		polkadot_node_subsystem_test_helpers::make_subsystem_context::<AllMessages, _>(pool);
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool);
 
 	let keystore = alice_keystore();
-	let backend = MockHeadsUp::default();
+	let mut backend = MockHeadsUp::default();
 	let activated_hash = Hash::random();
 	let update = dummy_active_leaves_update(activated_hash);
 	let mut state = PrepareValidationState { per_block_limit: 2, ..Default::default() };
 
 	let check_fut =
-		maybe_prepare_validation(ctx.sender(), keystore, backend.clone(), update, &mut state);
+		handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state);
 
 	let test_fut = async move {
-		assert_matches!(
-			ctx_handle.recv().await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => {
-				let _ = tx.send(Ok(1));
-			}
-		);
+		assert_new_active_leaf_messages(&mut ctx_handle, 1).await;
 
 		assert_matches!(
 			ctx_handle.recv().await,
@@ -2206,11 +2160,10 @@ fn maybe_prepare_validation_prepares_a_limited_number_of_pvfs() {
 #[test]
 fn maybe_prepare_validation_does_not_prepare_already_prepared_pvfs() {
 	let pool = TaskExecutor::new();
-	let (mut ctx, mut ctx_handle) =
-		polkadot_node_subsystem_test_helpers::make_subsystem_context::<AllMessages, _>(pool);
+	let (mut ctx, mut ctx_handle) = make_subsystem_context::<AllMessages, _>(pool);
 
 	let keystore = alice_keystore();
-	let backend = MockHeadsUp::default();
+	let mut backend = MockHeadsUp::default();
 	let activated_hash = Hash::random();
 	let update = dummy_active_leaves_update(activated_hash);
 	let mut state = PrepareValidationState {
@@ -2224,15 +2177,10 @@ fn maybe_prepare_validation_does_not_prepare_already_prepared_pvfs() {
 	};
 
 	let check_fut =
-		maybe_prepare_validation(ctx.sender(), keystore, backend.clone(), update, &mut state);
+		handle_active_leaves_update(ctx.sender(), keystore, &mut backend, update, &mut state);
 
 	let test_fut = async move {
-		assert_matches!(
-			ctx_handle.recv().await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx))) => {
-				let _ = tx.send(Ok(1));
-			}
-		);
+		assert_new_active_leaf_messages(&mut ctx_handle, 1).await;
 
 		assert_matches!(
 			ctx_handle.recv().await,
diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs
index 72a76537160d7ac58d250193d9a82ece619031e0..d92c98623823d1baebdef6a890eacc8f271362dc 100644
--- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs
+++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs
@@ -32,7 +32,7 @@
 //! The best chain contains all the candidates pending availability and a subsequent chain
 //! of candidates that have reached the backing quorum and are better than any other backable forks
 //! according to the fork selection rule (more on this rule later). It has a length of size at most
-//! `max_candidate_depth + 1`.
+//! `num_of_pending_candidates + num_of_assigned_cores_for_para`.
 //!
 //! The unconnected storage keeps a record of seconded/backable candidates that may be
 //! added to the best chain in the future.
@@ -100,13 +100,10 @@
 //! bounded. This means that higher-level code needs to be selective about limiting the amount of
 //! candidates that are considered.
 //!
-//! Practically speaking, the collator-protocol will not allow more than `max_candidate_depth + 1`
-//! collations to be fetched at a relay parent and statement-distribution will not allow more than
-//! `max_candidate_depth + 1` seconded candidates at a relay parent per each validator in the
-//! backing group. Considering the `allowed_ancestry_len` configuration value, the number of
-//! candidates in a `FragmentChain` (including its unconnected storage) should not exceed:
-//!
-//! `allowed_ancestry_len * (max_candidate_depth + 1) * backing_group_size`.
+//! Practically speaking, the collator-protocol will limit the number of fetched collations per
+//! core, to the number of claim queue assignments for the paraid on that core.
+//! Statement-distribution will not allow more than `scheduler_params.lookahead` seconded candidates
+//! at a relay parent per each validator in the backing group.
 //!
 //! The code in this module is not designed for speed or efficiency, but conceptual simplicity.
 //! Our assumption is that the amount of candidates and parachains we consider will be reasonably
@@ -453,8 +450,8 @@ pub(crate) struct Scope {
 	pending_availability: Vec<PendingAvailability>,
 	/// The base constraints derived from the latest included candidate.
 	base_constraints: Constraints,
-	/// Equal to `max_candidate_depth`.
-	max_depth: usize,
+	/// Maximum length of the best backable chain (including candidates pending availability).
+	max_backable_len: usize,
 }
 
 /// An error variant indicating that ancestors provided to a scope
@@ -474,7 +471,8 @@ pub(crate) struct UnexpectedAncestor {
 impl Scope {
 	/// Define a new [`Scope`].
 	///
-	/// All arguments are straightforward except the ancestors.
+	/// `max_backable_len` should be the maximum length of the best backable chain (excluding
+	/// pending availability candidates).
 	///
 	/// Ancestors should be in reverse order, starting with the parent
 	/// of the `relay_parent`, and proceeding backwards in block number
@@ -492,7 +490,7 @@ impl Scope {
 		relay_parent: RelayChainBlockInfo,
 		base_constraints: Constraints,
 		pending_availability: Vec<PendingAvailability>,
-		max_depth: usize,
+		max_backable_len: usize,
 		ancestors: impl IntoIterator<Item = RelayChainBlockInfo>,
 	) -> Result<Self, UnexpectedAncestor> {
 		let mut ancestors_map = BTreeMap::new();
@@ -517,8 +515,8 @@ impl Scope {
 		Ok(Scope {
 			relay_parent,
 			base_constraints,
+			max_backable_len: max_backable_len + pending_availability.len(),
 			pending_availability,
-			max_depth,
 			ancestors: ancestors_map,
 			ancestors_by_hash,
 		})
@@ -1192,7 +1190,7 @@ impl FragmentChain {
 		let Some(mut earliest_rp) = self.earliest_relay_parent() else { return };
 
 		loop {
-			if self.best_chain.chain.len() > self.scope.max_depth {
+			if self.best_chain.chain.len() >= self.scope.max_backable_len {
 				break;
 			}
 
diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs
index 9e7e570bd16f94b7cbdb3b0ba454ff4f983eb8f4..6bda09ecc26d4cf215b093516fcdb8f6cf112952 100644
--- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs
+++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs
@@ -116,7 +116,7 @@ fn scope_rejects_ancestors_that_skip_blocks() {
 		storage_root: Hash::repeat_byte(69),
 	}];
 
-	let max_depth = 2;
+	let max_depth = 3;
 	let base_constraints = make_constraints(8, vec![8, 9], vec![1, 2, 3].into());
 	let pending_availability = Vec::new();
 
@@ -146,7 +146,7 @@ fn scope_rejects_ancestor_for_0_block() {
 		storage_root: Hash::repeat_byte(69),
 	}];
 
-	let max_depth = 2;
+	let max_depth = 3;
 	let base_constraints = make_constraints(0, vec![], vec![1, 2, 3].into());
 	let pending_availability = Vec::new();
 
@@ -188,7 +188,7 @@ fn scope_only_takes_ancestors_up_to_min() {
 		},
 	];
 
-	let max_depth = 2;
+	let max_depth = 3;
 	let base_constraints = make_constraints(3, vec![2], vec![1, 2, 3].into());
 	let pending_availability = Vec::new();
 
@@ -231,7 +231,7 @@ fn scope_rejects_unordered_ancestors() {
 		},
 	];
 
-	let max_depth = 2;
+	let max_depth = 3;
 	let base_constraints = make_constraints(0, vec![2], vec![1, 2, 3].into());
 	let pending_availability = Vec::new();
 
@@ -497,7 +497,7 @@ fn test_populate_and_check_potential() {
 				relay_parent_z_info.clone(),
 				wrong_constraints.clone(),
 				vec![],
-				4,
+				5,
 				ancestors.clone(),
 			)
 			.unwrap();
@@ -530,7 +530,7 @@ fn test_populate_and_check_potential() {
 
 	// Various depths
 	{
-		// Depth is 0, only allows one candidate, but the others will be kept as potential.
+		// Depth is 0, doesn't allow any candidate, but the others will be kept as potential.
 		let scope = Scope::with_ancestors(
 			relay_parent_z_info.clone(),
 			base_constraints.clone(),
@@ -544,6 +544,27 @@ fn test_populate_and_check_potential() {
 		assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok());
 		assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok());
 
+		let chain = populate_chain_from_previous_storage(&scope, &storage);
+		assert!(chain.best_chain_vec().is_empty());
+		assert_eq!(
+			chain.unconnected().map(|c| c.candidate_hash).collect::<HashSet<_>>(),
+			[candidate_a_hash, candidate_b_hash, candidate_c_hash].into_iter().collect()
+		);
+
+		// Depth is 1, only allows one candidate, but the others will be kept as potential.
+		let scope = Scope::with_ancestors(
+			relay_parent_z_info.clone(),
+			base_constraints.clone(),
+			vec![],
+			1,
+			ancestors.clone(),
+		)
+		.unwrap();
+		let chain = FragmentChain::init(scope.clone(), CandidateStorage::default());
+		assert!(chain.can_add_candidate_as_potential(&candidate_a_entry).is_ok());
+		assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok());
+		assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok());
+
 		let chain = populate_chain_from_previous_storage(&scope, &storage);
 		assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash]);
 		assert_eq!(
@@ -551,12 +572,12 @@ fn test_populate_and_check_potential() {
 			[candidate_b_hash, candidate_c_hash].into_iter().collect()
 		);
 
-		// depth is 1, allows two candidates
+		// depth is 2, allows two candidates
 		let scope = Scope::with_ancestors(
 			relay_parent_z_info.clone(),
 			base_constraints.clone(),
 			vec![],
-			1,
+			2,
 			ancestors.clone(),
 		)
 		.unwrap();
@@ -572,8 +593,8 @@ fn test_populate_and_check_potential() {
 			[candidate_c_hash].into_iter().collect()
 		);
 
-		// depth is larger than 2, allows all three candidates
-		for depth in 2..6 {
+		// depth is at least 3, allows all three candidates
+		for depth in 3..6 {
 			let scope = Scope::with_ancestors(
 				relay_parent_z_info.clone(),
 				base_constraints.clone(),
@@ -605,7 +626,7 @@ fn test_populate_and_check_potential() {
 			relay_parent_z_info.clone(),
 			base_constraints.clone(),
 			vec![],
-			4,
+			5,
 			ancestors_without_x,
 		)
 		.unwrap();
@@ -628,7 +649,7 @@ fn test_populate_and_check_potential() {
 			relay_parent_z_info.clone(),
 			base_constraints.clone(),
 			vec![],
-			4,
+			5,
 			vec![],
 		)
 		.unwrap();
@@ -674,7 +695,7 @@ fn test_populate_and_check_potential() {
 			relay_parent_z_info.clone(),
 			base_constraints.clone(),
 			vec![],
-			4,
+			5,
 			ancestors.clone(),
 		)
 		.unwrap();
@@ -716,7 +737,7 @@ fn test_populate_and_check_potential() {
 		relay_parent_z_info.clone(),
 		base_constraints.clone(),
 		vec![],
-		4,
+		5,
 		ancestors.clone(),
 	)
 	.unwrap();
@@ -758,7 +779,7 @@ fn test_populate_and_check_potential() {
 		relay_parent_z_info.clone(),
 		base_constraints.clone(),
 		vec![],
-		4,
+		5,
 		ancestors.clone(),
 	)
 	.unwrap();
@@ -987,7 +1008,7 @@ fn test_populate_and_check_potential() {
 		relay_parent_z_info.clone(),
 		base_constraints.clone(),
 		vec![],
-		2,
+		3,
 		ancestors.clone(),
 	)
 	.unwrap();
@@ -1302,7 +1323,7 @@ fn test_populate_and_check_potential() {
 				relay_parent: relay_parent_z_info.clone(),
 			},
 		],
-		2,
+		0,
 		ancestors.clone(),
 	)
 	.unwrap();
@@ -1327,7 +1348,7 @@ fn test_populate_and_check_potential() {
 		relay_parent_z_info.clone(),
 		base_constraints.clone(),
 		vec![],
-		2,
+		3,
 		ancestors.clone(),
 	)
 	.unwrap();
@@ -1356,7 +1377,7 @@ fn test_populate_and_check_potential() {
 fn test_find_ancestor_path_and_find_backable_chain_empty_best_chain() {
 	let relay_parent = Hash::repeat_byte(1);
 	let required_parent: HeadData = vec![0xff].into();
-	let max_depth = 10;
+	let max_depth = 11;
 
 	// Empty chain
 	let base_constraints = make_constraints(0, vec![0], required_parent.clone());
@@ -1383,7 +1404,7 @@ fn test_find_ancestor_path_and_find_backable_chain() {
 	let para_id = ParaId::from(5u32);
 	let relay_parent = Hash::repeat_byte(1);
 	let required_parent: HeadData = vec![0xff].into();
-	let max_depth = 5;
+	let max_depth = 6;
 	let relay_parent_number = 0;
 	let relay_parent_storage_root = Hash::zero();
 
@@ -1568,7 +1589,7 @@ fn test_find_ancestor_path_and_find_backable_chain() {
 				candidate_hash: candidates[3],
 				relay_parent: relay_parent_info,
 			}],
-			max_depth,
+			max_depth - 1,
 			vec![],
 		)
 		.unwrap();
diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs
index 7416c97f3cd0251186aef8f043d09e7188148261..9b2006ee988a256c7b74aa51cb21ee88daf3f336 100644
--- a/polkadot/node/core/prospective-parachains/src/lib.rs
+++ b/polkadot/node/core/prospective-parachains/src/lib.rs
@@ -28,7 +28,7 @@
 
 #![deny(unused_crate_dependencies)]
 
-use std::collections::{HashMap, HashSet};
+use std::collections::{BTreeSet, HashMap, HashSet};
 
 use fragment_chain::CandidateStorage;
 use futures::{channel::oneshot, prelude::*};
@@ -47,10 +47,10 @@ use polkadot_node_subsystem_util::{
 	inclusion_emulator::{Constraints, RelayChainBlockInfo},
 	request_backing_constraints, request_candidates_pending_availability,
 	request_session_index_for_child,
-	runtime::{fetch_claim_queue, prospective_parachains_mode, ProspectiveParachainsMode},
+	runtime::{fetch_claim_queue, fetch_scheduling_lookahead},
 };
 use polkadot_primitives::{
-	vstaging::{CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState},
+	vstaging::{transpose_claim_queue, CommittedCandidateReceiptV2 as CommittedCandidateReceipt},
 	BlockNumber, CandidateHash, Hash, Header, Id as ParaId, PersistedValidationData,
 };
 
@@ -212,23 +212,8 @@ async fn handle_active_leaves_update<Context>(
 
 		let hash = activated.hash;
 
-		let mode = prospective_parachains_mode(ctx.sender(), hash)
-			.await
-			.map_err(JfyiError::Runtime)?;
-
-		let ProspectiveParachainsMode::Enabled { max_candidate_depth, allowed_ancestry_len } = mode
-		else {
-			gum::trace!(
-				target: LOG_TARGET,
-				block_hash = ?hash,
-				"Skipping leaf activation since async backing is disabled"
-			);
-
-			// Not a part of any allowed ancestry.
-			return Ok(())
-		};
-
-		let scheduled_paras = fetch_upcoming_paras(ctx, hash).await?;
+		let transposed_claim_queue =
+			transpose_claim_queue(fetch_claim_queue(ctx.sender(), hash).await?.0);
 
 		let block_info = match fetch_block_info(ctx, &mut temp_header_cache, hash).await? {
 			None => {
@@ -246,17 +231,26 @@ async fn handle_active_leaves_update<Context>(
 			Some(info) => info,
 		};
 
+		let session_index = request_session_index_for_child(hash, ctx.sender())
+			.await
+			.await
+			.map_err(JfyiError::RuntimeApiRequestCanceled)??;
+		let ancestry_len = fetch_scheduling_lookahead(hash, session_index, ctx.sender())
+			.await?
+			.saturating_sub(1);
+
 		let ancestry =
-			fetch_ancestry(ctx, &mut temp_header_cache, hash, allowed_ancestry_len).await?;
+			fetch_ancestry(ctx, &mut temp_header_cache, hash, ancestry_len as usize, session_index)
+				.await?;
 
 		let prev_fragment_chains =
 			ancestry.first().and_then(|prev_leaf| view.get_fragment_chains(&prev_leaf.hash));
 
 		let mut fragment_chains = HashMap::new();
-		for para in scheduled_paras {
+		for (para, claims_by_depth) in transposed_claim_queue.iter() {
 			// Find constraints and pending availability candidates.
 			let Some((constraints, pending_availability)) =
-				fetch_backing_constraints_and_candidates(ctx, hash, para).await?
+				fetch_backing_constraints_and_candidates(ctx, hash, *para).await?
 			else {
 				// This indicates a runtime conflict of some kind.
 				gum::debug!(
@@ -306,11 +300,13 @@ async fn handle_active_leaves_update<Context>(
 				compact_pending.push(c.compact);
 			}
 
+			let max_backable_chain_len =
+				claims_by_depth.values().flatten().collect::<BTreeSet<_>>().len();
 			let scope = match FragmentChainScope::with_ancestors(
 				block_info.clone().into(),
 				constraints,
 				compact_pending,
-				max_candidate_depth,
+				max_backable_chain_len,
 				ancestry
 					.iter()
 					.map(|a| RelayChainBlockInfo::from(a.clone()))
@@ -321,7 +317,7 @@ async fn handle_active_leaves_update<Context>(
 					gum::warn!(
 						target: LOG_TARGET,
 						para_id = ?para,
-						max_candidate_depth,
+						max_backable_chain_len,
 						?ancestry,
 						leaf = ?hash,
 						"Relay chain ancestors have wrong order: {:?}",
@@ -335,6 +331,7 @@ async fn handle_active_leaves_update<Context>(
 				target: LOG_TARGET,
 				relay_parent = ?hash,
 				min_relay_parent = scope.earliest_relay_parent().number,
+				max_backable_chain_len,
 				para_id = ?para,
 				ancestors = ?ancestry,
 				"Creating fragment chain"
@@ -359,7 +356,7 @@ async fn handle_active_leaves_update<Context>(
 			// If we know the previous fragment chain, use that for further populating the fragment
 			// chain.
 			if let Some(prev_fragment_chain) =
-				prev_fragment_chains.and_then(|chains| chains.get(&para))
+				prev_fragment_chains.and_then(|chains| chains.get(para))
 			{
 				chain.populate_from_previous(prev_fragment_chain);
 			}
@@ -381,7 +378,7 @@ async fn handle_active_leaves_update<Context>(
 				chain.unconnected().map(|candidate| candidate.hash()).collect::<Vec<_>>()
 			);
 
-			fragment_chains.insert(para, chain);
+			fragment_chains.insert(*para, chain);
 		}
 
 		view.per_relay_parent.insert(hash, RelayBlockViewData { fragment_chains });
@@ -950,57 +947,6 @@ async fn fetch_backing_constraints_and_candidates_inner<Context>(
 	Ok(Some((From::from(constraints), pending_availability)))
 }
 
-#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)]
-async fn fetch_upcoming_paras<Context>(
-	ctx: &mut Context,
-	relay_parent: Hash,
-) -> JfyiErrorResult<HashSet<ParaId>> {
-	Ok(match fetch_claim_queue(ctx.sender(), relay_parent).await? {
-		Some(claim_queue) => {
-			// Runtime supports claim queue - use it
-			claim_queue
-				.iter_all_claims()
-				.flat_map(|(_, paras)| paras.into_iter())
-				.copied()
-				.collect()
-		},
-		None => {
-			// fallback to availability cores - remove this branch once claim queue is released
-			// everywhere
-			let (tx, rx) = oneshot::channel();
-			ctx.send_message(RuntimeApiMessage::Request(
-				relay_parent,
-				RuntimeApiRequest::AvailabilityCores(tx),
-			))
-			.await;
-
-			let cores = rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??;
-
-			let mut upcoming = HashSet::with_capacity(cores.len());
-			for core in cores {
-				match core {
-					CoreState::Occupied(occupied) => {
-						// core sharing won't work optimally with this branch because the collations
-						// can't be prepared in advance.
-						if let Some(next_up_on_available) = occupied.next_up_on_available {
-							upcoming.insert(next_up_on_available.para_id);
-						}
-						if let Some(next_up_on_time_out) = occupied.next_up_on_time_out {
-							upcoming.insert(next_up_on_time_out.para_id);
-						}
-					},
-					CoreState::Scheduled(scheduled) => {
-						upcoming.insert(scheduled.para_id);
-					},
-					CoreState::Free => {},
-				}
-			}
-
-			upcoming
-		},
-	})
-}
-
 // Fetch ancestors in descending order, up to the amount requested.
 #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)]
 async fn fetch_ancestry<Context>(
@@ -1008,6 +954,7 @@ async fn fetch_ancestry<Context>(
 	cache: &mut HashMap<Hash, Header>,
 	relay_hash: Hash,
 	ancestors: usize,
+	required_session: u32,
 ) -> JfyiErrorResult<Vec<BlockInfo>> {
 	if ancestors == 0 {
 		return Ok(Vec::new())
@@ -1022,10 +969,6 @@ async fn fetch_ancestry<Context>(
 	.await;
 
 	let hashes = rx.map_err(JfyiError::ChainApiRequestCanceled).await??;
-	let required_session = request_session_index_for_child(relay_hash, ctx.sender())
-		.await
-		.await
-		.map_err(JfyiError::RuntimeApiRequestCanceled)??;
 
 	let mut block_info = Vec::with_capacity(hashes.len());
 	for hash in hashes {
diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs
index 5d1ef2f2f51cbeba95368e2f6767c11678817bb5..0900a3ee890005d84c25f14d4caafc9c5c8eb551 100644
--- a/polkadot/node/core/prospective-parachains/src/tests.rs
+++ b/polkadot/node/core/prospective-parachains/src/tests.rs
@@ -17,20 +17,21 @@
 use super::*;
 use assert_matches::assert_matches;
 use polkadot_node_subsystem::{
-	errors::RuntimeApiError,
 	messages::{
 		AllMessages, HypotheticalMembershipRequest, ParentHeadData, ProspectiveParachainsMessage,
 		ProspectiveValidationDataRequest,
 	},
+	RuntimeApiError,
 };
 use polkadot_node_subsystem_test_helpers as test_helpers;
 use polkadot_primitives::{
-	async_backing::{AsyncBackingParams, Constraints, InboundHrmpLimitations},
+	async_backing::{Constraints, InboundHrmpLimitations},
 	vstaging::{
 		async_backing::{BackingState, CandidatePendingAvailability, Constraints as ConstraintsV2},
 		CommittedCandidateReceiptV2 as CommittedCandidateReceipt, MutateDescriptorV2,
 	},
-	CoreIndex, HeadData, Header, PersistedValidationData, ScheduledCore, ValidationCodeHash,
+	CoreIndex, HeadData, Header, PersistedValidationData, ValidationCodeHash,
+	DEFAULT_SCHEDULING_LOOKAHEAD,
 };
 use polkadot_primitives_test_helpers::make_candidate;
 use rstest::rstest;
@@ -40,10 +41,6 @@ use std::{
 };
 use test_helpers::mock::new_leaf;
 
-const ALLOWED_ANCESTRY_LEN: u32 = 3;
-const ASYNC_BACKING_PARAMETERS: AsyncBackingParams =
-	AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: ALLOWED_ANCESTRY_LEN };
-
 const RUNTIME_API_NOT_SUPPORTED: RuntimeApiError =
 	RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" };
 
@@ -113,15 +110,21 @@ impl Default for TestState {
 		let chain_b = ParaId::from(2);
 
 		let mut claim_queue = BTreeMap::new();
-		claim_queue.insert(CoreIndex(0), [chain_a].into_iter().collect());
-		claim_queue.insert(CoreIndex(1), [chain_b].into_iter().collect());
+		claim_queue.insert(
+			CoreIndex(0),
+			std::iter::repeat(chain_a).take(DEFAULT_SCHEDULING_LOOKAHEAD as _).collect(),
+		);
+		claim_queue.insert(
+			CoreIndex(1),
+			std::iter::repeat(chain_b).take(DEFAULT_SCHEDULING_LOOKAHEAD as _).collect(),
+		);
 
 		let validation_code_hash = Hash::repeat_byte(42).into();
 
 		Self {
 			validation_code_hash,
 			claim_queue,
-			runtime_api_version: RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT,
+			runtime_api_version: RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT,
 		}
 	}
 }
@@ -229,15 +232,6 @@ async fn activate_leaf(
 	virtual_overseer: &mut VirtualOverseer,
 	leaf: &TestLeaf,
 	test_state: &TestState,
-) {
-	activate_leaf_with_params(virtual_overseer, leaf, test_state, ASYNC_BACKING_PARAMETERS).await;
-}
-
-async fn activate_leaf_with_parent_hash_fn(
-	virtual_overseer: &mut VirtualOverseer,
-	leaf: &TestLeaf,
-	test_state: &TestState,
-	parent_hash_fn: impl Fn(Hash) -> Hash,
 ) {
 	let TestLeaf { number, hash, .. } = leaf;
 
@@ -249,21 +243,14 @@ async fn activate_leaf_with_parent_hash_fn(
 		))))
 		.await;
 
-	handle_leaf_activation(
-		virtual_overseer,
-		leaf,
-		test_state,
-		ASYNC_BACKING_PARAMETERS,
-		parent_hash_fn,
-	)
-	.await;
+	handle_leaf_activation(virtual_overseer, leaf, test_state, get_parent_hash).await;
 }
 
-async fn activate_leaf_with_params(
+async fn activate_leaf_with_parent_hash_fn(
 	virtual_overseer: &mut VirtualOverseer,
 	leaf: &TestLeaf,
 	test_state: &TestState,
-	async_backing_params: AsyncBackingParams,
+	parent_hash_fn: impl Fn(Hash) -> Hash,
 ) {
 	let TestLeaf { number, hash, .. } = leaf;
 
@@ -275,21 +262,13 @@ async fn activate_leaf_with_params(
 		))))
 		.await;
 
-	handle_leaf_activation(
-		virtual_overseer,
-		leaf,
-		test_state,
-		async_backing_params,
-		get_parent_hash,
-	)
-	.await;
+	handle_leaf_activation(virtual_overseer, leaf, test_state, parent_hash_fn).await;
 }
 
 async fn handle_leaf_activation(
 	virtual_overseer: &mut VirtualOverseer,
 	leaf: &TestLeaf,
 	test_state: &TestState,
-	async_backing_params: AsyncBackingParams,
 	parent_hash_fn: impl Fn(Hash) -> Hash,
 ) {
 	let TestLeaf { number, hash, para_data } = leaf;
@@ -297,49 +276,31 @@ async fn handle_leaf_activation(
 	assert_matches!(
 		virtual_overseer.recv().await,
 		AllMessages::RuntimeApi(
-			RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx))
+			RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx))
 		) if parent == *hash => {
-			tx.send(Ok(async_backing_params)).unwrap();
+			tx.send(Ok(test_state.claim_queue.clone())).unwrap();
 		}
 	);
 
+	send_block_header(virtual_overseer, *hash, *number).await;
+
 	assert_matches!(
 		virtual_overseer.recv().await,
 		AllMessages::RuntimeApi(
-			RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx))
+			RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx))
 		) if parent == *hash => {
-			tx.send(
-				Ok(test_state.runtime_api_version)
-			).unwrap();
+			tx.send(Ok(1)).unwrap();
 		}
 	);
 
-	if test_state.runtime_api_version < RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT {
-		assert_matches!(
-			virtual_overseer.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx))
-			) if parent == *hash => {
-				tx.send(Ok(test_state.claim_queue.values().map(|paras| CoreState::Scheduled(
-					ScheduledCore {
-						para_id: *paras.front().unwrap(),
-						collator: None
-					}
-				)).collect())).unwrap();
-			}
-		);
-	} else {
-		assert_matches!(
-			virtual_overseer.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx))
-			) if parent == *hash => {
-				tx.send(Ok(test_state.claim_queue.clone())).unwrap();
-			}
-		);
-	}
-
-	send_block_header(virtual_overseer, *hash, *number).await;
+	assert_matches!(
+		virtual_overseer.recv().await,
+		AllMessages::RuntimeApi(
+			RuntimeApiMessage::Request(parent, RuntimeApiRequest::SchedulingLookahead(session_index, tx))
+		) if parent == *hash && session_index == 1 => {
+			tx.send(Ok(DEFAULT_SCHEDULING_LOOKAHEAD)).unwrap();
+		}
+	);
 
 	// Check that subsystem job issues a request for ancestors.
 	let min_min = para_data.iter().map(|(_, data)| data.min_relay_parent).min().unwrap_or(*number);
@@ -356,19 +317,10 @@ async fn handle_leaf_activation(
 			virtual_overseer.recv().await,
 			AllMessages::ChainApi(
 				ChainApiMessage::Ancestors{hash: block_hash, k, response_channel: tx}
-			) if block_hash == *hash && k == ALLOWED_ANCESTRY_LEN as usize => {
+			) if block_hash == *hash && k == (DEFAULT_SCHEDULING_LOOKAHEAD - 1) as usize => {
 				tx.send(Ok(ancestry_hashes.clone())).unwrap();
 			}
 		);
-
-		assert_matches!(
-			virtual_overseer.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx))
-			) if parent == *hash => {
-				tx.send(Ok(1)).unwrap();
-			}
-		);
 	}
 
 	let mut used_relay_parents = HashSet::new();
@@ -653,37 +605,6 @@ macro_rules! make_and_back_candidate {
 	}};
 }
 
-#[test]
-fn should_do_no_work_if_async_backing_disabled_for_leaf() {
-	async fn activate_leaf_async_backing_disabled(virtual_overseer: &mut VirtualOverseer) {
-		let hash = Hash::from_low_u64_be(130);
-
-		// Start work on some new parent.
-		virtual_overseer
-			.send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
-				ActiveLeavesUpdate::start_work(new_leaf(hash, 1)),
-			)))
-			.await;
-
-		assert_matches!(
-			virtual_overseer.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx))
-			) if parent == hash => {
-				tx.send(Err(RUNTIME_API_NOT_SUPPORTED)).unwrap();
-			}
-		);
-	}
-
-	let view = test_harness(|mut virtual_overseer| async move {
-		activate_leaf_async_backing_disabled(&mut virtual_overseer).await;
-
-		virtual_overseer
-	});
-
-	assert!(view.active_leaves.is_empty());
-}
-
 // Send some candidates and make sure all are found:
 // - Two for the same leaf A (one for parachain 1 and one for parachain 2)
 // - One for leaf B on parachain 1
@@ -869,6 +790,10 @@ fn introduce_candidates_basic(#[case] runtime_api_version: u32) {
 fn introduce_candidates_error(#[case] runtime_api_version: u32) {
 	let mut test_state = TestState::default();
 	test_state.set_runtime_api_version(runtime_api_version);
+	test_state.claim_queue.insert(
+		CoreIndex(2),
+		std::iter::repeat(1.into()).take(DEFAULT_SCHEDULING_LOOKAHEAD as _).collect(),
+	);
 
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
@@ -882,13 +807,7 @@ fn introduce_candidates_error(#[case] runtime_api_version: u32) {
 		};
 
 		// Activate leaves.
-		activate_leaf_with_params(
-			&mut virtual_overseer,
-			&leaf_a,
-			&test_state,
-			AsyncBackingParams { allowed_ancestry_len: 3, max_candidate_depth: 1 },
-		)
-		.await;
+		activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await;
 
 		// Candidate A.
 		let (candidate_a, pvd_a) = make_candidate(
@@ -1054,7 +973,11 @@ fn introduce_candidate_multiple_times(#[case] runtime_api_version: u32) {
 
 #[test]
 fn fragment_chain_best_chain_length_is_bounded() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
+	test_state.claim_queue.insert(
+		CoreIndex(2),
+		std::iter::repeat(1.into()).take(DEFAULT_SCHEDULING_LOOKAHEAD as _).collect(),
+	);
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
 		let leaf_a = TestLeaf {
@@ -1066,13 +989,7 @@ fn fragment_chain_best_chain_length_is_bounded() {
 			],
 		};
 		// Activate leaves.
-		activate_leaf_with_params(
-			&mut virtual_overseer,
-			&leaf_a,
-			&test_state,
-			AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 3 },
-		)
-		.await;
+		activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await;
 
 		// Candidates A, B and C form a chain.
 		let (candidate_a, pvd_a) = make_candidate(
@@ -1100,7 +1017,7 @@ fn fragment_chain_best_chain_length_is_bounded() {
 			test_state.validation_code_hash,
 		);
 
-		// Introduce candidates A and B. Since max depth is 1, only these two will be allowed.
+		// Introduce candidates A and B. Since max depth is 2, only these two will be allowed.
 		introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await;
 		introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await;
 
@@ -1447,6 +1364,13 @@ fn unconnected_candidates_become_connected(#[case] runtime_api_version: u32) {
 	// This doesn't test all the complicated cases with many unconnected candidates, as it's more
 	// extensively tested in the `fragment_chain::tests` module.
 	let mut test_state = TestState::default();
+	for i in 2..=4 {
+		test_state.claim_queue.insert(
+			CoreIndex(i),
+			std::iter::repeat(1.into()).take(DEFAULT_SCHEDULING_LOOKAHEAD as _).collect(),
+		);
+	}
+
 	test_state.set_runtime_api_version(runtime_api_version);
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
@@ -1547,7 +1471,11 @@ fn unconnected_candidates_become_connected(#[case] runtime_api_version: u32) {
 // Backs some candidates and tests `GetBackableCandidates` when requesting a single candidate.
 #[test]
 fn check_backable_query_single_candidate() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
+	test_state.claim_queue.insert(
+		CoreIndex(2),
+		std::iter::repeat(1.into()).take(DEFAULT_SCHEDULING_LOOKAHEAD as _).collect(),
+	);
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
 		let leaf_a = TestLeaf {
@@ -1688,11 +1616,20 @@ fn check_backable_query_single_candidate() {
 #[rstest]
 #[case(RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT)]
 #[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)]
+
 fn check_backable_query_multiple_candidates(#[case] runtime_api_version: u32) {
 	// This doesn't test all the complicated cases with many unconnected candidates, as it's more
 	// extensively tested in the `fragment_chain::tests` module.
 	let mut test_state = TestState::default();
 	test_state.set_runtime_api_version(runtime_api_version);
+	// Add three more cores for para A, so that we can get a chain of max length 4
+	for i in 2..=4 {
+		test_state.claim_queue.insert(
+			CoreIndex(i),
+			std::iter::repeat(1.into()).take(DEFAULT_SCHEDULING_LOOKAHEAD as _).collect(),
+		);
+	}
+
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
 		let leaf_a = TestLeaf {
@@ -1990,20 +1927,8 @@ fn check_hypothetical_membership_query(#[case] runtime_api_version: u32) {
 		};
 
 		// Activate leaves.
-		activate_leaf_with_params(
-			&mut virtual_overseer,
-			&leaf_a,
-			&test_state,
-			AsyncBackingParams { allowed_ancestry_len: 3, max_candidate_depth: 1 },
-		)
-		.await;
-		activate_leaf_with_params(
-			&mut virtual_overseer,
-			&leaf_b,
-			&test_state,
-			AsyncBackingParams { allowed_ancestry_len: 3, max_candidate_depth: 1 },
-		)
-		.await;
+		activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await;
+		activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await;
 
 		// Candidates will be valid on both leaves.
 
@@ -2285,15 +2210,9 @@ fn check_pvd_query(#[case] runtime_api_version: u32) {
 
 // Test simultaneously activating and deactivating leaves, and simultaneously deactivating
 // multiple leaves.
-// This test is parametrised with the runtime api version. For versions that don't support the claim
-// queue API, we check that av-cores are used.
-#[rstest]
-#[case(RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT)]
-#[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)]
-#[case(8)]
-fn correctly_updates_leaves(#[case] runtime_api_version: u32) {
-	let mut test_state = TestState::default();
-	test_state.set_runtime_api_version(runtime_api_version);
+#[test]
+fn correctly_updates_leaves() {
+	let test_state = TestState::default();
 
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
@@ -2347,14 +2266,7 @@ fn correctly_updates_leaves(#[case] runtime_api_version: u32) {
 		virtual_overseer
 			.send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)))
 			.await;
-		handle_leaf_activation(
-			&mut virtual_overseer,
-			&leaf_c,
-			&test_state,
-			ASYNC_BACKING_PARAMETERS,
-			get_parent_hash,
-		)
-		.await;
+		handle_leaf_activation(&mut virtual_overseer, &leaf_c, &test_state, get_parent_hash).await;
 
 		// Remove all remaining leaves.
 		let update = ActiveLeavesUpdate {
@@ -2394,17 +2306,21 @@ fn correctly_updates_leaves(#[case] runtime_api_version: u32) {
 #[case(RuntimeApiRequest::CONSTRAINTS_RUNTIME_REQUIREMENT)]
 #[case(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)]
 fn handle_active_leaves_update_gets_candidates_from_parent(#[case] runtime_api_version: u32) {
+	let para_id = ParaId::from(1);
+
 	// This doesn't test all the complicated cases with many unconnected candidates, as it's more
 	// extensively tested in the `fragment_chain::tests` module.
 	let mut test_state = TestState::default();
 	test_state.set_runtime_api_version(runtime_api_version);
-	let para_id = ParaId::from(1);
-	test_state.claim_queue = test_state
-		.claim_queue
-		.into_iter()
-		.filter(|(_, paras)| matches!(paras.front(), Some(para) if para == &para_id))
-		.collect();
-	assert_eq!(test_state.claim_queue.len(), 1);
+
+	test_state.claim_queue = BTreeMap::new();
+	for i in 0..=4 {
+		test_state.claim_queue.insert(
+			CoreIndex(i),
+			std::iter::repeat(para_id).take(DEFAULT_SCHEDULING_LOOKAHEAD as _).collect(),
+		);
+	}
+
 	let view = test_harness(|mut virtual_overseer| async move {
 		// Leaf A
 		let leaf_a = TestLeaf {
@@ -2657,12 +2573,14 @@ fn handle_active_leaves_update_bounded_implicit_view() {
 		.collect();
 	assert_eq!(test_state.claim_queue.len(), 1);
 
+	let scheduling_lookahead = DEFAULT_SCHEDULING_LOOKAHEAD;
+
 	let mut leaves = vec![TestLeaf {
 		number: 100,
 		hash: Hash::from_low_u64_be(130),
 		para_data: vec![(
 			para_id,
-			PerParaData::new(100 - ALLOWED_ANCESTRY_LEN, HeadData(vec![1, 2, 3])),
+			PerParaData::new(100 - (scheduling_lookahead - 1), HeadData(vec![1, 2, 3])),
 		)],
 	}];
 
@@ -2674,7 +2592,7 @@ fn handle_active_leaves_update_bounded_implicit_view() {
 			para_data: vec![(
 				para_id,
 				PerParaData::new(
-					prev_leaf.number - 1 - ALLOWED_ANCESTRY_LEN,
+					prev_leaf.number - 1 - (scheduling_lookahead - 1),
 					HeadData(vec![1, 2, 3]),
 				),
 			)],
@@ -2698,16 +2616,13 @@ fn handle_active_leaves_update_bounded_implicit_view() {
 
 	// Only latest leaf is active.
 	assert_eq!(view.active_leaves.len(), 1);
-	// We keep allowed_ancestry_len implicit leaves. The latest leaf is also present here.
-	assert_eq!(
-		view.per_relay_parent.len() as u32,
-		ASYNC_BACKING_PARAMETERS.allowed_ancestry_len + 1
-	);
+	// We keep scheduling_lookahead - 1 implicit leaves. The latest leaf is also present here.
+	assert_eq!(view.per_relay_parent.len() as u32, scheduling_lookahead);
 
 	assert_eq!(view.active_leaves, [leaves[9].hash].into_iter().collect());
 	assert_eq!(
 		view.per_relay_parent.into_keys().collect::<HashSet<_>>(),
-		leaves[6..].into_iter().map(|l| l.hash).collect::<HashSet<_>>()
+		leaves[7..].into_iter().map(|l| l.hash).collect::<HashSet<_>>()
 	);
 }
 
@@ -2735,7 +2650,7 @@ fn persists_pending_availability_candidate(#[case] runtime_api_version: u32) {
 		let candidate_relay_parent_number = 97;
 
 		let leaf_a = TestLeaf {
-			number: candidate_relay_parent_number + ALLOWED_ANCESTRY_LEN,
+			number: candidate_relay_parent_number + DEFAULT_SCHEDULING_LOOKAHEAD,
 			hash: Hash::from_low_u64_be(2),
 			para_data: vec![(
 				para_id,
@@ -2823,102 +2738,12 @@ fn persists_pending_availability_candidate(#[case] runtime_api_version: u32) {
 	});
 }
 
-#[test]
-fn backwards_compatible_with_non_async_backing_params() {
-	let mut test_state = TestState::default();
-	let para_id = ParaId::from(1);
-	test_state.claim_queue = test_state
-		.claim_queue
-		.into_iter()
-		.filter(|(_, paras)| matches!(paras.front(), Some(para) if para == &para_id))
-		.collect();
-	assert_eq!(test_state.claim_queue.len(), 1);
-
-	test_harness(|mut virtual_overseer| async move {
-		let para_head = HeadData(vec![1, 2, 3]);
-
-		let leaf_b_hash = Hash::repeat_byte(15);
-		let candidate_relay_parent = get_parent_hash(leaf_b_hash);
-		let candidate_relay_parent_number = 100;
-
-		let leaf_a = TestLeaf {
-			number: candidate_relay_parent_number,
-			hash: candidate_relay_parent,
-			para_data: vec![(
-				para_id,
-				PerParaData::new(candidate_relay_parent_number, para_head.clone()),
-			)],
-		};
-
-		// Activate leaf.
-		activate_leaf_with_params(
-			&mut virtual_overseer,
-			&leaf_a,
-			&test_state,
-			AsyncBackingParams { allowed_ancestry_len: 0, max_candidate_depth: 0 },
-		)
-		.await;
-
-		// Candidate A
-		let (candidate_a, pvd_a) = make_candidate(
-			candidate_relay_parent,
-			candidate_relay_parent_number,
-			para_id,
-			para_head.clone(),
-			HeadData(vec![1]),
-			test_state.validation_code_hash,
-		);
-		let candidate_hash_a = candidate_a.hash();
-
-		introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await;
-		back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await;
-
-		get_backable_candidates(
-			&mut virtual_overseer,
-			&leaf_a,
-			para_id,
-			Ancestors::new(),
-			1,
-			vec![(candidate_hash_a, candidate_relay_parent)],
-		)
-		.await;
-
-		let leaf_b = TestLeaf {
-			number: candidate_relay_parent_number + 1,
-			hash: leaf_b_hash,
-			para_data: vec![(
-				para_id,
-				PerParaData::new(candidate_relay_parent_number + 1, para_head.clone()),
-			)],
-		};
-		activate_leaf_with_params(
-			&mut virtual_overseer,
-			&leaf_b,
-			&test_state,
-			AsyncBackingParams { allowed_ancestry_len: 0, max_candidate_depth: 0 },
-		)
-		.await;
-
-		get_backable_candidates(
-			&mut virtual_overseer,
-			&leaf_b,
-			para_id,
-			Ancestors::new(),
-			1,
-			vec![],
-		)
-		.await;
-
-		virtual_overseer
-	});
-}
-
 #[test]
 fn uses_ancestry_only_within_session() {
 	test_harness(|mut virtual_overseer| async move {
 		let number = 5;
 		let hash = Hash::repeat_byte(5);
-		let ancestry_len = 3;
+		let scheduling_lookahead = DEFAULT_SCHEDULING_LOOKAHEAD;
 		let session = 2;
 
 		let ancestry_hashes =
@@ -2933,51 +2758,41 @@ fn uses_ancestry_only_within_session() {
 			)))
 			.await;
 
-		assert_matches!(
-		virtual_overseer.recv().await,
-		AllMessages::RuntimeApi(
-			RuntimeApiMessage::Request(
-				parent,
-				RuntimeApiRequest::AsyncBackingParams(tx)
-			)) if parent == hash => {
-				tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len})).unwrap();
-		});
-
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx))
+				RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx))
 			) if parent == hash => {
-				tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap();
+				tx.send(Ok(BTreeMap::new())).unwrap();
 			}
 		);
 
+		send_block_header(&mut virtual_overseer, hash, number).await;
+
 		assert_matches!(
 			virtual_overseer.recv().await,
 			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(parent, RuntimeApiRequest::ClaimQueue(tx))
+				RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx))
 			) if parent == hash => {
-				tx.send(Ok(BTreeMap::new())).unwrap();
+				tx.send(Ok(session)).unwrap();
 			}
 		);
 
-		send_block_header(&mut virtual_overseer, hash, number).await;
-
 		assert_matches!(
 			virtual_overseer.recv().await,
-			AllMessages::ChainApi(
-				ChainApiMessage::Ancestors{hash: block_hash, k, response_channel: tx}
-			) if block_hash == hash && k == ancestry_len as usize => {
-				tx.send(Ok(ancestry_hashes.clone())).unwrap();
+			AllMessages::RuntimeApi(
+				RuntimeApiMessage::Request(parent, RuntimeApiRequest::SchedulingLookahead(session_index, tx))
+			) if parent == hash && session_index == session => {
+				tx.send(Ok(scheduling_lookahead)).unwrap();
 			}
 		);
 
 		assert_matches!(
 			virtual_overseer.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx))
-			) if parent == hash => {
-				tx.send(Ok(session)).unwrap();
+			AllMessages::ChainApi(
+				ChainApiMessage::Ancestors{hash: block_hash, k, response_channel: tx}
+			) if block_hash == hash && k == (scheduling_lookahead - 1) as usize => {
+				tx.send(Ok(ancestry_hashes.clone())).unwrap();
 			}
 		);
 
diff --git a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/tests.rs b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/tests.rs
index 8c0d478b67df4e65553ab5ed6614ff1a920d7491..1f814989fc6a85fc104d0f565402fcf09388391a 100644
--- a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/tests.rs
+++ b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/tests.rs
@@ -26,8 +26,8 @@ use polkadot_node_subsystem::messages::{
 };
 use polkadot_node_subsystem_test_helpers::{mock::new_leaf, TestSubsystemSender};
 use polkadot_primitives::{
-	CandidateHash, DisputeState, InvalidDisputeStatementKind, SessionIndex,
-	ValidDisputeStatementKind, ValidatorSignature,
+	vstaging::CandidateReceiptV2 as CandidateReceipt, CandidateHash, DisputeState,
+	InvalidDisputeStatementKind, SessionIndex, ValidDisputeStatementKind, ValidatorSignature,
 };
 
 //
diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs
index a95df6c5f8808950f4531c0ba5f20cac07a959ab..4aced8eaefdfe09e294a97e7cfce2840b0583cca 100644
--- a/polkadot/node/core/provisioner/src/lib.rs
+++ b/polkadot/node/core/provisioner/src/lib.rs
@@ -28,23 +28,21 @@ use schnellru::{ByLength, LruMap};
 
 use polkadot_node_subsystem::{
 	messages::{
-		Ancestors, CandidateBackingMessage, ChainApiMessage, ProspectiveParachainsMessage,
-		ProvisionableData, ProvisionerInherentData, ProvisionerMessage, RuntimeApiRequest,
+		Ancestors, CandidateBackingMessage, ProspectiveParachainsMessage, ProvisionableData,
+		ProvisionerInherentData, ProvisionerMessage,
 	},
 	overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem,
 	SubsystemError,
 };
 use polkadot_node_subsystem_util::{
-	has_required_runtime, request_availability_cores, request_persisted_validation_data,
-	request_session_index_for_child,
-	runtime::{prospective_parachains_mode, request_node_features, ProspectiveParachainsMode},
+	request_availability_cores, request_session_index_for_child, runtime::request_node_features,
 	TimeoutExt,
 };
 use polkadot_primitives::{
 	node_features::FeatureIndex,
-	vstaging::{BackedCandidate, CandidateReceiptV2 as CandidateReceipt, CoreState},
-	BlockNumber, CandidateHash, CoreIndex, Hash, Id as ParaId, NodeFeatures,
-	OccupiedCoreAssumption, SessionIndex, SignedAvailabilityBitfield, ValidatorIndex,
+	vstaging::{BackedCandidate, CoreState},
+	CandidateHash, CoreIndex, Hash, Id as ParaId, NodeFeatures, SessionIndex,
+	SignedAvailabilityBitfield, ValidatorIndex,
 };
 use std::collections::{BTreeMap, HashMap};
 
@@ -65,9 +63,6 @@ const SEND_INHERENT_DATA_TIMEOUT: std::time::Duration = core::time::Duration::fr
 
 const LOG_TARGET: &str = "parachain::provisioner";
 
-const PRIORITIZED_SELECTION_RUNTIME_VERSION_REQUIREMENT: u32 =
-	RuntimeApiRequest::DISPUTES_RUNTIME_REQUIREMENT;
-
 /// The provisioner subsystem.
 pub struct ProvisionerSubsystem {
 	metrics: Metrics,
@@ -82,15 +77,12 @@ impl ProvisionerSubsystem {
 
 /// Per-session info we need for the provisioner subsystem.
 pub struct PerSession {
-	prospective_parachains_mode: ProspectiveParachainsMode,
 	elastic_scaling_mvp: bool,
 }
 
 /// A per-relay-parent state for the provisioning subsystem.
 pub struct PerRelayParent {
 	leaf: ActivatedLeaf,
-	backed_candidates: Vec<CandidateReceipt>,
-	prospective_parachains_mode: ProspectiveParachainsMode,
 	elastic_scaling_mvp: bool,
 	signed_bitfields: Vec<SignedAvailabilityBitfield>,
 	is_inherent_ready: bool,
@@ -101,8 +93,6 @@ impl PerRelayParent {
 	fn new(leaf: ActivatedLeaf, per_session: &PerSession) -> Self {
 		Self {
 			leaf,
-			backed_candidates: Vec::new(),
-			prospective_parachains_mode: per_session.prospective_parachains_mode,
 			elastic_scaling_mvp: per_session.elastic_scaling_mvp,
 			signed_bitfields: Vec::new(),
 			is_inherent_ready: false,
@@ -212,8 +202,6 @@ async fn handle_active_leaves_update(
 			.await
 			.map_err(Error::CanceledSessionIndex)??;
 		if per_session.get(&session_index).is_none() {
-			let prospective_parachains_mode =
-				prospective_parachains_mode(sender, leaf.hash).await?;
 			let elastic_scaling_mvp = request_node_features(leaf.hash, session_index, sender)
 				.await?
 				.unwrap_or(NodeFeatures::EMPTY)
@@ -221,10 +209,7 @@ async fn handle_active_leaves_update(
 				.map(|b| *b)
 				.unwrap_or(false);
 
-			per_session.insert(
-				session_index,
-				PerSession { prospective_parachains_mode, elastic_scaling_mvp },
-			);
+			per_session.insert(session_index, PerSession { elastic_scaling_mvp });
 		}
 
 		let session_info = per_session.get(&session_index).expect("Just inserted");
@@ -287,8 +272,6 @@ async fn send_inherent_data_bg<Context>(
 ) -> Result<(), Error> {
 	let leaf = per_relay_parent.leaf.clone();
 	let signed_bitfields = per_relay_parent.signed_bitfields.clone();
-	let backed_candidates = per_relay_parent.backed_candidates.clone();
-	let mode = per_relay_parent.prospective_parachains_mode;
 	let elastic_scaling_mvp = per_relay_parent.elastic_scaling_mvp;
 
 	let mut sender = ctx.sender().clone();
@@ -305,8 +288,6 @@ async fn send_inherent_data_bg<Context>(
 		let send_result = send_inherent_data(
 			&leaf,
 			&signed_bitfields,
-			&backed_candidates,
-			mode,
 			elastic_scaling_mvp,
 			return_senders,
 			&mut sender,
@@ -357,16 +338,6 @@ fn note_provisionable_data(
 	match provisionable_data {
 		ProvisionableData::Bitfield(_, signed_bitfield) =>
 			per_relay_parent.signed_bitfields.push(signed_bitfield),
-		ProvisionableData::BackedCandidate(backed_candidate) => {
-			let candidate_hash = backed_candidate.hash();
-			gum::trace!(
-				target: LOG_TARGET,
-				?candidate_hash,
-				para = ?backed_candidate.descriptor().para_id(),
-				"noted backed candidate",
-			);
-			per_relay_parent.backed_candidates.push(backed_candidate);
-		},
 		// We choose not to punish these forms of misbehavior for the time being.
 		// Risks from misbehavior are sufficiently mitigated at the protocol level
 		// via reputation changes. Punitive actions here may become desirable
@@ -412,8 +383,6 @@ type CoreAvailability = BitVec<u8, bitvec::order::Lsb0>;
 async fn send_inherent_data(
 	leaf: &ActivatedLeaf,
 	bitfields: &[SignedAvailabilityBitfield],
-	candidates: &[CandidateReceipt],
-	prospective_parachains_mode: ProspectiveParachainsMode,
 	elastic_scaling_mvp: bool,
 	return_senders: Vec<oneshot::Sender<ProvisionerInherentData>>,
 	from_job: &mut impl overseer::ProvisionerSenderTrait,
@@ -435,16 +404,6 @@ async fn send_inherent_data(
 		"Selecting disputes"
 	);
 
-	debug_assert!(
-		has_required_runtime(
-			from_job,
-			leaf.hash,
-			PRIORITIZED_SELECTION_RUNTIME_VERSION_REQUIREMENT,
-		)
-		.await,
-		"randomized selection no longer supported, please upgrade your runtime!"
-	);
-
 	let disputes = disputes::prioritized_selection::select_disputes(from_job, metrics, leaf).await;
 
 	gum::trace!(
@@ -461,16 +420,9 @@ async fn send_inherent_data(
 		"Selected bitfields"
 	);
 
-	let candidates = select_candidates(
-		&availability_cores,
-		&bitfields,
-		candidates,
-		prospective_parachains_mode,
-		elastic_scaling_mvp,
-		leaf.hash,
-		from_job,
-	)
-	.await?;
+	let candidates =
+		select_candidates(&availability_cores, &bitfields, elastic_scaling_mvp, leaf, from_job)
+			.await?;
 
 	gum::trace!(
 		target: LOG_TARGET,
@@ -577,114 +529,16 @@ fn select_availability_bitfields(
 	selected.into_values().collect()
 }
 
-/// Selects candidates from tracked ones to note in a relay chain block.
-///
-/// Should be called when prospective parachains are disabled.
-async fn select_candidate_hashes_from_tracked(
-	availability_cores: &[CoreState],
-	bitfields: &[SignedAvailabilityBitfield],
-	candidates: &[CandidateReceipt],
-	relay_parent: Hash,
-	sender: &mut impl overseer::ProvisionerSenderTrait,
-) -> Result<HashMap<ParaId, Vec<(CandidateHash, Hash)>>, Error> {
-	let block_number = get_block_number_under_construction(relay_parent, sender).await?;
-
-	let mut selected_candidates =
-		HashMap::with_capacity(candidates.len().min(availability_cores.len()));
-
-	gum::debug!(
-		target: LOG_TARGET,
-		leaf_hash=?relay_parent,
-		n_candidates = candidates.len(),
-		"Candidate receipts (before selection)",
-	);
-
-	for (core_idx, core) in availability_cores.iter().enumerate() {
-		let (scheduled_core, assumption) = match core {
-			CoreState::Scheduled(scheduled_core) => (scheduled_core, OccupiedCoreAssumption::Free),
-			CoreState::Occupied(occupied_core) => {
-				if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability)
-				{
-					if let Some(ref scheduled_core) = occupied_core.next_up_on_available {
-						(scheduled_core, OccupiedCoreAssumption::Included)
-					} else {
-						continue
-					}
-				} else {
-					if occupied_core.time_out_at != block_number {
-						continue
-					}
-					if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out {
-						(scheduled_core, OccupiedCoreAssumption::TimedOut)
-					} else {
-						continue
-					}
-				}
-			},
-			CoreState::Free => continue,
-		};
-
-		if selected_candidates.contains_key(&scheduled_core.para_id) {
-			// We already picked a candidate for this parachain. Elastic scaling only works with
-			// prospective parachains mode.
-			continue
-		}
-
-		let validation_data = match request_persisted_validation_data(
-			relay_parent,
-			scheduled_core.para_id,
-			assumption,
-			sender,
-		)
-		.await
-		.await
-		.map_err(|err| Error::CanceledPersistedValidationData(err))??
-		{
-			Some(v) => v,
-			None => continue,
-		};
-
-		let computed_validation_data_hash = validation_data.hash();
-
-		// we arbitrarily pick the first of the backed candidates which match the appropriate
-		// selection criteria
-		if let Some(candidate) = candidates.iter().find(|backed_candidate| {
-			let descriptor = &backed_candidate.descriptor;
-			descriptor.para_id() == scheduled_core.para_id &&
-				descriptor.persisted_validation_data_hash() == computed_validation_data_hash
-		}) {
-			let candidate_hash = candidate.hash();
-			gum::trace!(
-				target: LOG_TARGET,
-				leaf_hash=?relay_parent,
-				?candidate_hash,
-				para = ?candidate.descriptor.para_id(),
-				core = core_idx,
-				"Selected candidate receipt",
-			);
-
-			selected_candidates.insert(
-				candidate.descriptor.para_id(),
-				vec![(candidate_hash, candidate.descriptor.relay_parent())],
-			);
-		}
-	}
-
-	Ok(selected_candidates)
-}
-
 /// Requests backable candidates from Prospective Parachains subsystem
 /// based on core states.
-///
-/// Should be called when prospective parachains are enabled.
 async fn request_backable_candidates(
 	availability_cores: &[CoreState],
 	elastic_scaling_mvp: bool,
 	bitfields: &[SignedAvailabilityBitfield],
-	relay_parent: Hash,
+	relay_parent: &ActivatedLeaf,
 	sender: &mut impl overseer::ProvisionerSenderTrait,
 ) -> Result<HashMap<ParaId, Vec<(CandidateHash, Hash)>>, Error> {
-	let block_number = get_block_number_under_construction(relay_parent, sender).await?;
+	let block_number_under_construction = relay_parent.number + 1;
 
 	// Record how many cores are scheduled for each paraid. Use a BTreeMap because
 	// we'll need to iterate through them.
@@ -716,7 +570,7 @@ async fn request_backable_candidates(
 						// Request a new backable candidate for the newly scheduled para id.
 						*scheduled_cores_per_para.entry(scheduled_core.para_id).or_insert(0) += 1;
 					}
-				} else if occupied_core.time_out_at <= block_number {
+				} else if occupied_core.time_out_at <= block_number_under_construction {
 					// Timed out before being available.
 
 					if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out {
@@ -747,7 +601,7 @@ async fn request_backable_candidates(
 		}
 
 		let response = get_backable_candidates(
-			relay_parent,
+			relay_parent.hash,
 			para_id,
 			para_ancestors,
 			core_count as u32,
@@ -758,7 +612,7 @@ async fn request_backable_candidates(
 		if response.is_empty() {
 			gum::debug!(
 				target: LOG_TARGET,
-				leaf_hash = ?relay_parent,
+				leaf_hash = ?relay_parent.hash,
 				?para_id,
 				"No backable candidate returned by prospective parachains",
 			);
@@ -776,38 +630,26 @@ async fn request_backable_candidates(
 async fn select_candidates(
 	availability_cores: &[CoreState],
 	bitfields: &[SignedAvailabilityBitfield],
-	candidates: &[CandidateReceipt],
-	prospective_parachains_mode: ProspectiveParachainsMode,
 	elastic_scaling_mvp: bool,
-	relay_parent: Hash,
+	leaf: &ActivatedLeaf,
 	sender: &mut impl overseer::ProvisionerSenderTrait,
 ) -> Result<Vec<BackedCandidate>, Error> {
+	let relay_parent = leaf.hash;
 	gum::trace!(
 		target: LOG_TARGET,
 		leaf_hash=?relay_parent,
 		"before GetBackedCandidates"
 	);
 
-	let selected_candidates = match prospective_parachains_mode {
-		ProspectiveParachainsMode::Enabled { .. } =>
-			request_backable_candidates(
-				availability_cores,
-				elastic_scaling_mvp,
-				bitfields,
-				relay_parent,
-				sender,
-			)
-			.await?,
-		ProspectiveParachainsMode::Disabled =>
-			select_candidate_hashes_from_tracked(
-				availability_cores,
-				bitfields,
-				&candidates,
-				relay_parent,
-				sender,
-			)
-			.await?,
-	};
+	let selected_candidates = request_backable_candidates(
+		availability_cores,
+		elastic_scaling_mvp,
+		bitfields,
+		leaf,
+		sender,
+	)
+	.await?;
+
 	gum::debug!(target: LOG_TARGET, ?selected_candidates, "Got backable candidates");
 
 	// now get the backed candidates corresponding to these candidate receipts
@@ -817,8 +659,11 @@ async fn select_candidates(
 		tx,
 	));
 	let candidates = rx.await.map_err(|err| Error::CanceledBackedCandidates(err))?;
-	gum::trace!(target: LOG_TARGET, leaf_hash=?relay_parent,
-				"Got {} backed candidates", candidates.len());
+	gum::trace!(
+		target: LOG_TARGET,
+		leaf_hash=?relay_parent,
+		"Got {} backed candidates", candidates.len()
+	);
 
 	// keep only one candidate with validation code.
 	let mut with_validation_code = false;
@@ -850,22 +695,6 @@ async fn select_candidates(
 	Ok(merged_candidates)
 }
 
-/// Produces a block number 1 higher than that of the relay parent
-/// in the event of an invalid `relay_parent`, returns `Ok(0)`
-async fn get_block_number_under_construction(
-	relay_parent: Hash,
-	sender: &mut impl overseer::ProvisionerSenderTrait,
-) -> Result<BlockNumber, Error> {
-	let (tx, rx) = oneshot::channel();
-	sender.send_message(ChainApiMessage::BlockNumber(relay_parent, tx)).await;
-
-	match rx.await.map_err(|err| Error::CanceledBlockNumber(err))? {
-		Ok(Some(n)) => Ok(n + 1),
-		Ok(None) => Ok(0),
-		Err(err) => Err(err.into()),
-	}
-}
-
 /// Requests backable candidates from Prospective Parachains based on
 /// the given ancestors in the fragment chain. The ancestors may not be ordered.
 async fn get_backable_candidates(
diff --git a/polkadot/node/core/provisioner/src/tests.rs b/polkadot/node/core/provisioner/src/tests.rs
index a09b243f3ab1338ecff24ba239978d60f2acaebc..4667f44d65a0310fbf3e32d712dcdbc7aed31f0a 100644
--- a/polkadot/node/core/provisioner/src/tests.rs
+++ b/polkadot/node/core/provisioner/src/tests.rs
@@ -254,10 +254,12 @@ mod select_candidates {
 			AvailabilityCores, PersistedValidationData as PersistedValidationDataReq,
 		},
 	};
-	use polkadot_node_subsystem_test_helpers::TestSubsystemSender;
-	use polkadot_node_subsystem_util::runtime::ProspectiveParachainsMode;
+	use polkadot_node_subsystem_test_helpers::{mock::new_leaf, TestSubsystemSender};
 	use polkadot_primitives::{
-		vstaging::{CommittedCandidateReceiptV2 as CommittedCandidateReceipt, MutateDescriptorV2},
+		vstaging::{
+			CandidateReceiptV2 as CandidateReceipt,
+			CommittedCandidateReceiptV2 as CommittedCandidateReceipt, MutateDescriptorV2,
+		},
 		BlockNumber, CandidateCommitments, PersistedValidationData,
 	};
 	use polkadot_primitives_test_helpers::{dummy_candidate_descriptor_v2, dummy_hash};
@@ -557,9 +559,7 @@ mod select_candidates {
 		mock_availability_cores: Vec<CoreState>,
 		mut expected: Vec<BackedCandidate>,
 		mut expected_ancestors: HashMap<Vec<CandidateHash>, Ancestors>,
-		prospective_parachains_mode: ProspectiveParachainsMode,
 	) {
-		use ChainApiMessage::BlockNumber;
 		use RuntimeApiMessage::Request;
 
 		let mut backed = expected.clone().into_iter().fold(HashMap::new(), |mut acc, candidate| {
@@ -574,8 +574,6 @@ mod select_candidates {
 
 		while let Some(from_job) = receiver.next().await {
 			match from_job {
-				AllMessages::ChainApi(BlockNumber(_relay_parent, tx)) =>
-					tx.send(Ok(Some(BLOCK_UNDER_PRODUCTION - 1))).unwrap(),
 				AllMessages::RuntimeApi(Request(
 					_parent_hash,
 					PersistedValidationDataReq(_para_id, _assumption, tx),
@@ -624,175 +622,57 @@ mod select_candidates {
 						actual_ancestors,
 						tx,
 					),
-				) => match prospective_parachains_mode {
-					ProspectiveParachainsMode::Enabled { .. } => {
-						assert!(count > 0);
-						let candidates =
-							(&mut candidates_iter).take(count as usize).collect::<Vec<_>>();
-						assert_eq!(candidates.len(), count as usize);
-
-						if !expected_ancestors.is_empty() {
-							if let Some(expected_required_ancestors) = expected_ancestors.remove(
-								&(candidates
-									.clone()
-									.into_iter()
-									.take(actual_ancestors.len())
-									.map(|(c_hash, _)| c_hash)
-									.collect::<Vec<_>>()),
-							) {
-								assert_eq!(expected_required_ancestors, actual_ancestors);
-							} else {
-								assert_eq!(actual_ancestors.len(), 0);
-							}
+				) => {
+					assert!(count > 0);
+					let candidates =
+						(&mut candidates_iter).take(count as usize).collect::<Vec<_>>();
+					assert_eq!(candidates.len(), count as usize);
+
+					if !expected_ancestors.is_empty() {
+						if let Some(expected_required_ancestors) = expected_ancestors.remove(
+							&(candidates
+								.clone()
+								.into_iter()
+								.take(actual_ancestors.len())
+								.map(|(c_hash, _)| c_hash)
+								.collect::<Vec<_>>()),
+						) {
+							assert_eq!(expected_required_ancestors, actual_ancestors);
+						} else {
+							assert_eq!(actual_ancestors.len(), 0);
 						}
+					}
 
-						let _ = tx.send(candidates);
-					},
-					ProspectiveParachainsMode::Disabled =>
-						panic!("unexpected prospective parachains request"),
+					let _ = tx.send(candidates);
 				},
 				_ => panic!("Unexpected message: {:?}", from_job),
 			}
 		}
 
-		if let ProspectiveParachainsMode::Enabled { .. } = prospective_parachains_mode {
-			assert_eq!(candidates_iter.next(), None);
-		}
+		assert_eq!(candidates_iter.next(), None);
 		assert_eq!(expected_ancestors.len(), 0);
 	}
 
-	#[rstest]
-	#[case(ProspectiveParachainsMode::Disabled)]
-	#[case(ProspectiveParachainsMode::Enabled {max_candidate_depth: 0, allowed_ancestry_len: 0})]
-	fn can_succeed(#[case] prospective_parachains_mode: ProspectiveParachainsMode) {
+	#[test]
+	fn can_succeed() {
 		test_harness(
-			|r| {
-				mock_overseer(
-					r,
-					Vec::new(),
-					Vec::new(),
-					HashMap::new(),
-					prospective_parachains_mode,
-				)
-			},
+			|r| mock_overseer(r, Vec::new(), Vec::new(), HashMap::new()),
 			|mut tx: TestSubsystemSender| async move {
 				select_candidates(
 					&[],
 					&[],
-					&[],
-					prospective_parachains_mode,
 					false,
-					Default::default(),
-					&mut tx,
-				)
-				.await
-				.unwrap();
-			},
-		)
-	}
-
-	// Test candidate selection when prospective parachains mode is disabled.
-	// This tests that only the appropriate candidates get selected when prospective parachains mode
-	// is disabled. To accomplish this, we supply a candidate list containing one candidate per
-	// possible core; the candidate selection algorithm must filter them to the appropriate set
-	#[rstest]
-	// why those particular indices? see the comments on mock_availability_cores_*() functions.
-	#[case(mock_availability_cores_one_per_para(), vec![1, 4, 7, 8, 10], true)]
-	#[case(mock_availability_cores_one_per_para(), vec![1, 4, 7, 8, 10], false)]
-	#[case(mock_availability_cores_multiple_per_para(), vec![1, 4, 7, 8, 10, 12, 13, 14, 15], true)]
-	#[case(mock_availability_cores_multiple_per_para(), vec![1, 4, 7, 8, 10, 12, 13, 14, 15], false)]
-	fn test_in_subsystem_selection(
-		#[case] mock_cores: Vec<CoreState>,
-		#[case] expected_candidates: Vec<usize>,
-		#[case] elastic_scaling_mvp: bool,
-	) {
-		let candidate_template = dummy_candidate_template();
-		let candidates: Vec<_> = std::iter::repeat(candidate_template)
-			.take(mock_cores.len())
-			.enumerate()
-			.map(|(idx, mut candidate)| {
-				candidate.descriptor.set_para_id(idx.into());
-				candidate
-			})
-			.cycle()
-			.take(mock_cores.len() * 3)
-			.enumerate()
-			.map(|(idx, mut candidate)| {
-				if idx < mock_cores.len() {
-					// first go-around: use candidates which should work
-					candidate
-				} else if idx < mock_cores.len() * 2 {
-					// for the second repetition of the candidates, give them the wrong hash
-					candidate.descriptor.set_persisted_validation_data_hash(Default::default());
-					candidate
-				} else {
-					// third go-around: right hash, wrong para_id
-					candidate.descriptor.set_para_id(idx.into());
-					candidate
-				}
-			})
-			.collect();
-
-		let expected_candidates: Vec<_> =
-			expected_candidates.into_iter().map(|idx| candidates[idx].clone()).collect();
-		let prospective_parachains_mode = ProspectiveParachainsMode::Disabled;
-
-		let expected_backed = expected_candidates
-			.iter()
-			.map(|c| {
-				BackedCandidate::new(
-					CommittedCandidateReceipt {
-						descriptor: c.descriptor().clone(),
-						commitments: Default::default(),
-					},
-					Vec::new(),
-					default_bitvec(MOCK_GROUP_SIZE),
-					None,
-				)
-			})
-			.collect();
-
-		let mock_cores_clone = mock_cores.clone();
-		test_harness(
-			|r| {
-				mock_overseer(
-					r,
-					mock_cores_clone,
-					expected_backed,
-					HashMap::new(),
-					prospective_parachains_mode,
-				)
-			},
-			|mut tx: TestSubsystemSender| async move {
-				let result: Vec<BackedCandidate> = select_candidates(
-					&mock_cores,
-					&[],
-					&candidates,
-					prospective_parachains_mode,
-					elastic_scaling_mvp,
-					Default::default(),
+					&new_leaf(Default::default(), BLOCK_UNDER_PRODUCTION - 1),
 					&mut tx,
 				)
 				.await
 				.unwrap();
-
-				result.into_iter().for_each(|c| {
-					assert!(
-						expected_candidates.iter().any(|c2| c.candidate().corresponds_to(c2)),
-						"Failed to find candidate: {:?}",
-						c,
-					)
-				});
 			},
 		)
 	}
 
-	#[rstest]
-	#[case(ProspectiveParachainsMode::Disabled)]
-	#[case(ProspectiveParachainsMode::Enabled {max_candidate_depth: 0, allowed_ancestry_len: 0})]
-	fn selects_max_one_code_upgrade_one_core_per_para(
-		#[case] prospective_parachains_mode: ProspectiveParachainsMode,
-	) {
+	#[test]
+	fn selects_max_one_code_upgrade_one_core_per_para() {
 		let mock_cores = mock_availability_cores_one_per_para();
 
 		let empty_hash = PersistedValidationData::<Hash, BlockNumber>::default().hash();
@@ -855,23 +735,13 @@ mod select_candidates {
 		let mock_cores_clone = mock_cores.clone();
 
 		test_harness(
-			|r| {
-				mock_overseer(
-					r,
-					mock_cores_clone,
-					expected_backed,
-					HashMap::new(),
-					prospective_parachains_mode,
-				)
-			},
+			|r| mock_overseer(r, mock_cores_clone, expected_backed, HashMap::new()),
 			|mut tx: TestSubsystemSender| async move {
 				let result = select_candidates(
 					&mock_cores,
 					&[],
-					&candidates,
-					prospective_parachains_mode,
 					false,
-					Default::default(),
+					&new_leaf(Default::default(), BLOCK_UNDER_PRODUCTION - 1),
 					&mut tx,
 				)
 				.await
@@ -890,8 +760,6 @@ mod select_candidates {
 
 	#[test]
 	fn selects_max_one_code_upgrade_multiple_cores_per_para() {
-		let prospective_parachains_mode =
-			ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 };
 		let mock_cores = vec![
 			// 0: Scheduled(default),
 			Scheduled(scheduled_core(1)),
@@ -970,23 +838,13 @@ mod select_candidates {
 		let mock_cores_clone = mock_cores.clone();
 
 		test_harness(
-			|r| {
-				mock_overseer(
-					r,
-					mock_cores_clone,
-					expected_backed,
-					HashMap::new(),
-					prospective_parachains_mode,
-				)
-			},
+			|r| mock_overseer(r, mock_cores_clone, expected_backed, HashMap::new()),
 			|mut tx: TestSubsystemSender| async move {
 				let result = select_candidates(
 					&mock_cores,
 					&[],
-					&candidates,
-					prospective_parachains_mode,
 					true,
-					Default::default(),
+					&new_leaf(Default::default(), BLOCK_UNDER_PRODUCTION - 1),
 					&mut tx,
 				)
 				.await
@@ -1004,7 +862,7 @@ mod select_candidates {
 	#[rstest]
 	#[case(true)]
 	#[case(false)]
-	fn request_from_prospective_parachains_one_core_per_para(#[case] elastic_scaling_mvp: bool) {
+	fn one_core_per_para(#[case] elastic_scaling_mvp: bool) {
 		let mock_cores = mock_availability_cores_one_per_para();
 
 		// why those particular indices? see the comments on mock_availability_cores()
@@ -1012,10 +870,6 @@ mod select_candidates {
 		let (candidates, expected_candidates) =
 			make_candidates(mock_cores.len() + 1, expected_candidates);
 
-		// Expect prospective parachains subsystem requests.
-		let prospective_parachains_mode =
-			ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 };
-
 		let mut required_ancestors: HashMap<Vec<CandidateHash>, Ancestors> = HashMap::new();
 		required_ancestors.insert(
 			vec![candidates[4]],
@@ -1029,23 +883,13 @@ mod select_candidates {
 		let mock_cores_clone = mock_cores.clone();
 		let expected_candidates_clone = expected_candidates.clone();
 		test_harness(
-			|r| {
-				mock_overseer(
-					r,
-					mock_cores_clone,
-					expected_candidates_clone,
-					required_ancestors,
-					prospective_parachains_mode,
-				)
-			},
+			|r| mock_overseer(r, mock_cores_clone, expected_candidates_clone, required_ancestors),
 			|mut tx: TestSubsystemSender| async move {
 				let result = select_candidates(
 					&mock_cores,
 					&[],
-					&[],
-					prospective_parachains_mode,
 					elastic_scaling_mvp,
-					Default::default(),
+					&new_leaf(Default::default(), BLOCK_UNDER_PRODUCTION - 1),
 					&mut tx,
 				)
 				.await
@@ -1066,15 +910,12 @@ mod select_candidates {
 	}
 
 	#[test]
-	fn request_from_prospective_parachains_multiple_cores_per_para_elastic_scaling_mvp() {
+	fn multiple_cores_per_para_elastic_scaling_mvp() {
 		let mock_cores = mock_availability_cores_multiple_per_para();
 
 		// why those particular indices? see the comments on mock_availability_cores()
 		let expected_candidates: Vec<_> =
 			vec![1, 4, 7, 8, 10, 12, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15];
-		// Expect prospective parachains subsystem requests.
-		let prospective_parachains_mode =
-			ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 };
 
 		let (candidates, expected_candidates) =
 			make_candidates(mock_cores.len(), expected_candidates);
@@ -1119,23 +960,13 @@ mod select_candidates {
 		let mock_cores_clone = mock_cores.clone();
 		let expected_candidates_clone = expected_candidates.clone();
 		test_harness(
-			|r| {
-				mock_overseer(
-					r,
-					mock_cores_clone,
-					expected_candidates,
-					required_ancestors,
-					prospective_parachains_mode,
-				)
-			},
+			|r| mock_overseer(r, mock_cores_clone, expected_candidates, required_ancestors),
 			|mut tx: TestSubsystemSender| async move {
 				let result = select_candidates(
 					&mock_cores,
 					&[],
-					&[],
-					prospective_parachains_mode,
 					true,
-					Default::default(),
+					&new_leaf(Default::default(), BLOCK_UNDER_PRODUCTION - 1),
 					&mut tx,
 				)
 				.await
@@ -1156,14 +987,11 @@ mod select_candidates {
 	}
 
 	#[test]
-	fn request_from_prospective_parachains_multiple_cores_per_para_elastic_scaling_mvp_disabled() {
+	fn multiple_cores_per_para_elastic_scaling_mvp_disabled() {
 		let mock_cores = mock_availability_cores_multiple_per_para();
 
 		// why those particular indices? see the comments on mock_availability_cores()
 		let expected_candidates: Vec<_> = vec![1, 4, 7, 8, 10];
-		// Expect prospective parachains subsystem requests.
-		let prospective_parachains_mode =
-			ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 };
 
 		let (candidates, expected_candidates) =
 			make_candidates(mock_cores.len(), expected_candidates);
@@ -1181,23 +1009,13 @@ mod select_candidates {
 		let mock_cores_clone = mock_cores.clone();
 		let expected_candidates_clone = expected_candidates.clone();
 		test_harness(
-			|r| {
-				mock_overseer(
-					r,
-					mock_cores_clone,
-					expected_candidates,
-					required_ancestors,
-					prospective_parachains_mode,
-				)
-			},
+			|r| mock_overseer(r, mock_cores_clone, expected_candidates, required_ancestors),
 			|mut tx: TestSubsystemSender| async move {
 				let result = select_candidates(
 					&mock_cores,
 					&[],
-					&[],
-					prospective_parachains_mode,
 					false,
-					Default::default(),
+					&new_leaf(Default::default(), BLOCK_UNDER_PRODUCTION - 1),
 					&mut tx,
 				)
 				.await
@@ -1235,9 +1053,6 @@ mod select_candidates {
 		// why those particular indices? see the comments on mock_availability_cores()
 		let expected_candidates: Vec<_> =
 			[1, 4, 7, 8, 10, 12].iter().map(|&idx| candidates[idx].clone()).collect();
-		// Expect prospective parachains subsystem requests.
-		let prospective_parachains_mode =
-			ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 };
 
 		let expected_backed = expected_candidates
 			.iter()
@@ -1256,23 +1071,13 @@ mod select_candidates {
 
 		let mock_cores_clone = mock_cores.clone();
 		test_harness(
-			|r| {
-				mock_overseer(
-					r,
-					mock_cores_clone,
-					expected_backed,
-					HashMap::new(),
-					prospective_parachains_mode,
-				)
-			},
+			|r| mock_overseer(r, mock_cores_clone, expected_backed, HashMap::new()),
 			|mut tx: TestSubsystemSender| async move {
 				let result = select_candidates(
 					&mock_cores,
 					&[],
-					&[],
-					prospective_parachains_mode,
 					false,
-					Default::default(),
+					&new_leaf(Default::default(), BLOCK_UNDER_PRODUCTION - 1),
 					&mut tx,
 				)
 				.await
diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs
index 8a885ea9cc92fc9948074ce5aeff85db496da1a0..4ed42626d88eeba2fdcfcc0f3960b4803361dbb9 100644
--- a/polkadot/node/core/runtime-api/src/cache.rs
+++ b/polkadot/node/core/runtime-api/src/cache.rs
@@ -76,6 +76,7 @@ pub(crate) struct RequestResultCache {
 	approval_voting_params: LruMap<SessionIndex, ApprovalVotingParams>,
 	claim_queue: LruMap<Hash, BTreeMap<CoreIndex, VecDeque<ParaId>>>,
 	backing_constraints: LruMap<(Hash, ParaId), Option<Constraints>>,
+	scheduling_lookahead: LruMap<SessionIndex, u32>,
 }
 
 impl Default for RequestResultCache {
@@ -114,6 +115,7 @@ impl Default for RequestResultCache {
 			node_features: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)),
 			claim_queue: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)),
 			backing_constraints: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)),
+			scheduling_lookahead: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)),
 		}
 	}
 }
@@ -576,10 +578,21 @@ impl RequestResultCache {
 	) {
 		self.backing_constraints.insert(key, value);
 	}
+
+	pub(crate) fn scheduling_lookahead(&mut self, session_index: SessionIndex) -> Option<u32> {
+		self.scheduling_lookahead.get(&session_index).copied()
+	}
+
+	pub(crate) fn cache_scheduling_lookahead(
+		&mut self,
+		session_index: SessionIndex,
+		scheduling_lookahead: u32,
+	) {
+		self.scheduling_lookahead.insert(session_index, scheduling_lookahead);
+	}
 }
 
 pub(crate) enum RequestResult {
-	// The structure of each variant is (relay_parent, [params,]*, result)
 	Authorities(Hash, Vec<AuthorityDiscoveryId>),
 	Validators(Hash, Vec<ValidatorId>),
 	MinimumBackingVotes(SessionIndex, u32),
@@ -628,4 +641,5 @@ pub(crate) enum RequestResult {
 	ClaimQueue(Hash, BTreeMap<CoreIndex, VecDeque<ParaId>>),
 	CandidatesPendingAvailability(Hash, ParaId, Vec<CommittedCandidateReceipt>),
 	BackingConstraints(Hash, ParaId, Option<Constraints>),
+	SchedulingLookahead(SessionIndex, u32),
 }
diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs
index 4889822b46a9b3dc8de972f265b2febee6d7d762..2d864c8cf2f4c59355c35161f5ecab90a0dfeb30 100644
--- a/polkadot/node/core/runtime-api/src/lib.rs
+++ b/polkadot/node/core/runtime-api/src/lib.rs
@@ -168,7 +168,7 @@ where
 			KeyOwnershipProof(relay_parent, validator_id, key_ownership_proof) => self
 				.requests_cache
 				.cache_key_ownership_proof((relay_parent, validator_id), key_ownership_proof),
-			RequestResult::ApprovalVotingParams(_relay_parent, session_index, params) =>
+			ApprovalVotingParams(_relay_parent, session_index, params) =>
 				self.requests_cache.cache_approval_voting_params(session_index, params),
 			SubmitReportDisputeLost(_) => {},
 			DisabledValidators(relay_parent, disabled_validators) =>
@@ -186,6 +186,9 @@ where
 			BackingConstraints(relay_parent, para_id, constraints) => self
 				.requests_cache
 				.cache_backing_constraints((relay_parent, para_id), constraints),
+			SchedulingLookahead(session_index, scheduling_lookahead) => self
+				.requests_cache
+				.cache_scheduling_lookahead(session_index, scheduling_lookahead),
 		}
 	}
 
@@ -345,6 +348,15 @@ where
 				query!(claim_queue(), sender).map(|sender| Request::ClaimQueue(sender)),
 			Request::BackingConstraints(para, sender) => query!(backing_constraints(para), sender)
 				.map(|sender| Request::BackingConstraints(para, sender)),
+			Request::SchedulingLookahead(index, sender) => {
+				if let Some(value) = self.requests_cache.scheduling_lookahead(index) {
+					self.metrics.on_cached_request();
+					let _ = sender.send(Ok(value));
+					None
+				} else {
+					Some(Request::SchedulingLookahead(index, sender))
+				}
+			},
 		}
 	}
 
@@ -665,5 +677,12 @@ where
 				sender
 			)
 		},
+		Request::SchedulingLookahead(index, sender) => query!(
+			SchedulingLookahead,
+			scheduling_lookahead(),
+			ver = Request::SCHEDULING_LOOKAHEAD_RUNTIME_REQUIREMENT,
+			sender,
+			result = (index)
+		),
 	}
 }
diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs
index 56c6087695786bc55bd400a817d449d6fdc7a559..bbc580129002271ad718e75d1b31874ddc839064 100644
--- a/polkadot/node/core/runtime-api/src/tests.rs
+++ b/polkadot/node/core/runtime-api/src/tests.rs
@@ -308,6 +308,10 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient {
 		todo!("Not required for tests")
 	}
 
+	async fn scheduling_lookahead(&self, _: Hash) -> Result<u32, ApiError> {
+		todo!("Not required for tests")
+	}
+
 	async fn backing_constraints(
 		&self,
 		_at: Hash,
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs
index 6a570331f710b3a845d5ebcd02a007e45eb3f662..aa2fc52e90c522e52f31683a184a71e5557d8f1c 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs
@@ -21,9 +21,7 @@ use std::collections::{HashSet, VecDeque};
 use futures::{future::BoxFuture, stream::FuturesUnordered};
 
 use polkadot_node_network_protocol::{
-	request_response::{
-		incoming::OutgoingResponse, v1 as protocol_v1, v2 as protocol_v2, IncomingRequest,
-	},
+	request_response::{incoming::OutgoingResponse, v2 as protocol_v2, IncomingRequest},
 	PeerId,
 };
 use polkadot_node_primitives::PoV;
@@ -90,16 +88,9 @@ pub struct WaitingCollationFetches {
 
 /// Backwards-compatible wrapper for incoming collations requests.
 pub enum VersionedCollationRequest {
-	V1(IncomingRequest<protocol_v1::CollationFetchingRequest>),
 	V2(IncomingRequest<protocol_v2::CollationFetchingRequest>),
 }
 
-impl From<IncomingRequest<protocol_v1::CollationFetchingRequest>> for VersionedCollationRequest {
-	fn from(req: IncomingRequest<protocol_v1::CollationFetchingRequest>) -> Self {
-		Self::V1(req)
-	}
-}
-
 impl From<IncomingRequest<protocol_v2::CollationFetchingRequest>> for VersionedCollationRequest {
 	fn from(req: IncomingRequest<protocol_v2::CollationFetchingRequest>) -> Self {
 		Self::V2(req)
@@ -110,15 +101,20 @@ impl VersionedCollationRequest {
 	/// Returns parachain id from the request payload.
 	pub fn para_id(&self) -> ParaId {
 		match self {
-			VersionedCollationRequest::V1(req) => req.payload.para_id,
 			VersionedCollationRequest::V2(req) => req.payload.para_id,
 		}
 	}
 
+	/// Returns candidate hash from the request payload.
+	pub fn candidate_hash(&self) -> CandidateHash {
+		match self {
+			VersionedCollationRequest::V2(req) => req.payload.candidate_hash,
+		}
+	}
+
 	/// Returns relay parent from the request payload.
 	pub fn relay_parent(&self) -> Hash {
 		match self {
-			VersionedCollationRequest::V1(req) => req.payload.relay_parent,
 			VersionedCollationRequest::V2(req) => req.payload.relay_parent,
 		}
 	}
@@ -126,7 +122,6 @@ impl VersionedCollationRequest {
 	/// Returns id of the peer the request was received from.
 	pub fn peer_id(&self) -> PeerId {
 		match self {
-			VersionedCollationRequest::V1(req) => req.peer,
 			VersionedCollationRequest::V2(req) => req.peer,
 		}
 	}
@@ -134,10 +129,9 @@ impl VersionedCollationRequest {
 	/// Sends the response back to requester.
 	pub fn send_outgoing_response(
 		self,
-		response: OutgoingResponse<protocol_v1::CollationFetchingResponse>,
+		response: OutgoingResponse<protocol_v2::CollationFetchingResponse>,
 	) -> Result<(), ()> {
 		match self {
-			VersionedCollationRequest::V1(req) => req.send_outgoing_response(response),
 			VersionedCollationRequest::V2(req) => req.send_outgoing_response(response),
 		}
 	}
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
index d77480272cb4f80f7190a3f8362242037baefbcb..50c933599f5497a6e92862d5a69c17eccceb01f7 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs
@@ -31,7 +31,7 @@ use polkadot_node_network_protocol::{
 	peer_set::{CollationVersion, PeerSet},
 	request_response::{
 		incoming::{self, OutgoingResponse},
-		v1 as request_v1, v2 as request_v2, IncomingRequestReceiver,
+		v2 as request_v2, IncomingRequestReceiver,
 	},
 	v1 as protocol_v1, v2 as protocol_v2, OurView, PeerId, UnifiedReputationChange as Rep,
 	Versioned, View,
@@ -40,23 +40,18 @@ use polkadot_node_primitives::{CollationSecondedSignal, PoV, Statement};
 use polkadot_node_subsystem::{
 	messages::{
 		CollatorProtocolMessage, NetworkBridgeEvent, NetworkBridgeTxMessage, ParentHeadData,
-		RuntimeApiMessage,
 	},
 	overseer, FromOrchestra, OverseerSignal,
 };
 use polkadot_node_subsystem_util::{
 	backing_implicit_view::View as ImplicitView,
 	reputation::{ReputationAggregator, REPUTATION_CHANGE_INTERVAL},
-	runtime::{
-		fetch_claim_queue, get_availability_cores, get_group_rotation_info,
-		prospective_parachains_mode, ProspectiveParachainsMode, RuntimeInfo,
-	},
+	runtime::{fetch_claim_queue, get_group_rotation_info, ClaimQueueSnapshot, RuntimeInfo},
 	TimeoutExt,
 };
 use polkadot_primitives::{
-	vstaging::{CandidateReceiptV2 as CandidateReceipt, CoreState},
-	AuthorityDiscoveryId, CandidateHash, CollatorPair, CoreIndex, GroupIndex, Hash, HeadData,
-	Id as ParaId, SessionIndex,
+	vstaging::CandidateReceiptV2 as CandidateReceipt, AuthorityDiscoveryId, CandidateHash,
+	CollatorPair, CoreIndex, GroupIndex, Hash, HeadData, Id as ParaId, SessionIndex,
 };
 
 use super::LOG_TARGET;
@@ -199,8 +194,6 @@ impl ValidatorGroup {
 struct PeerData {
 	/// Peer's view.
 	view: View,
-	/// Network protocol version.
-	version: CollationVersion,
 	/// Unknown heads in the view.
 	///
 	/// This can happen when the validator is faster at importing a block and sending out its
@@ -229,21 +222,27 @@ impl CollationWithCoreIndex {
 }
 
 struct PerRelayParent {
-	prospective_parachains_mode: ProspectiveParachainsMode,
 	/// Per core index validators group responsible for backing candidates built
 	/// on top of this relay parent.
 	validator_group: HashMap<CoreIndex, ValidatorGroup>,
 	/// Distributed collations.
 	collations: HashMap<CandidateHash, CollationWithCoreIndex>,
+	/// Number of assignments per core
+	assignments: HashMap<CoreIndex, usize>,
 }
 
 impl PerRelayParent {
-	fn new(mode: ProspectiveParachainsMode) -> Self {
-		Self {
-			prospective_parachains_mode: mode,
-			validator_group: HashMap::default(),
-			collations: HashMap::new(),
-		}
+	fn new(para_id: ParaId, claim_queue: ClaimQueueSnapshot) -> Self {
+		let assignments =
+			claim_queue.iter_all_claims().fold(HashMap::new(), |mut acc, (core, claims)| {
+				let n_claims = claims.iter().filter(|para| para == &&para_id).count();
+				if n_claims > 0 {
+					acc.insert(*core, n_claims);
+				}
+				acc
+			});
+
+		Self { validator_group: HashMap::default(), collations: HashMap::new(), assignments }
 	}
 }
 
@@ -262,24 +261,11 @@ struct State {
 	/// to determine what is relevant to them.
 	peer_data: HashMap<PeerId, PeerData>,
 
-	/// Leaves that do support asynchronous backing along with
-	/// implicit ancestry. Leaves from the implicit view are present in
-	/// `active_leaves`, the opposite doesn't hold true.
-	///
-	/// Relay-chain blocks which don't support prospective parachains are
-	/// never included in the fragment chains of active leaves which do. In
-	/// particular, this means that if a given relay parent belongs to implicit
-	/// ancestry of some active leaf, then it does support prospective parachains.
+	/// Leaves along with implicit ancestry.
 	///
 	/// It's `None` if the collator is not yet collating for a paraid.
 	implicit_view: Option<ImplicitView>,
 
-	/// All active leaves observed by us, including both that do and do not
-	/// support prospective parachains. This mapping works as a replacement for
-	/// [`polkadot_node_network_protocol::View`] and can be dropped once the transition
-	/// to asynchronous backing is done.
-	active_leaves: HashMap<Hash, ProspectiveParachainsMode>,
-
 	/// Validators and distributed collations tracked for each relay parent from
 	/// our view, including both leaves and implicit ancestry.
 	per_relay_parent: HashMap<Hash, PerRelayParent>,
@@ -340,7 +326,6 @@ impl State {
 			collating_on: Default::default(),
 			peer_data: Default::default(),
 			implicit_view: None,
-			active_leaves: Default::default(),
 			per_relay_parent: Default::default(),
 			collation_result_senders: Default::default(),
 			peer_ids: Default::default(),
@@ -359,7 +344,7 @@ impl State {
 /// Figure out the core our para is assigned to and the relevant validators.
 /// Issue a connection request to these validators.
 /// If the para is not scheduled or next up on any core, at the relay-parent,
-/// or the relay-parent isn't in the active-leaves set, we ignore the message
+/// or the relay-parent isn't in the implicit ancestry, we ignore the message
 /// as it must be invalid in that case - although this indicates a logic error
 /// elsewhere in the node.
 #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
@@ -391,20 +376,32 @@ async fn distribute_collation<Context>(
 			return Ok(())
 		},
 	};
-	let relay_parent_mode = per_relay_parent.prospective_parachains_mode;
 
-	let collations_limit = match relay_parent_mode {
-		ProspectiveParachainsMode::Disabled => 1,
-		ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } => max_candidate_depth + 1,
+	let Some(collations_limit) = per_relay_parent.assignments.get(&core_index) else {
+		gum::warn!(
+			target: LOG_TARGET,
+			para_id = %id,
+			relay_parent = ?candidate_relay_parent,
+			cores = ?per_relay_parent.assignments.keys(),
+			?core_index,
+			"Attempting to distribute collation for a core we are not assigned to ",
+		);
+
+		return Ok(())
 	};
 
-	if per_relay_parent.collations.len() >= collations_limit {
+	let current_collations_count = per_relay_parent
+		.collations
+		.values()
+		.filter(|c| c.core_index() == &core_index)
+		.count();
+	if current_collations_count >= *collations_limit {
 		gum::debug!(
 			target: LOG_TARGET,
 			?candidate_relay_parent,
-			?relay_parent_mode,
-			"The limit of {} collations per relay parent is already reached",
+			"The limit of {} collations per relay parent for core {} is already reached",
 			collations_limit,
+			core_index.0,
 		);
 		return Ok(())
 	}
@@ -420,54 +417,21 @@ async fn distribute_collation<Context>(
 		return Ok(())
 	}
 
-	// Determine which core(s) the para collated-on is assigned to.
-	// If it is not scheduled then ignore the message.
-	let (our_cores, num_cores) =
-		match determine_cores(ctx.sender(), id, candidate_relay_parent, relay_parent_mode).await? {
-			(cores, _num_cores) if cores.is_empty() => {
-				gum::warn!(
-					target: LOG_TARGET,
-					para_id = %id,
-					"looks like no core is assigned to {} at {}", id, candidate_relay_parent,
-				);
-
-				return Ok(())
-			},
-			(cores, num_cores) => (cores, num_cores),
-		};
-
-	let elastic_scaling = our_cores.len() > 1;
+	let elastic_scaling = per_relay_parent.assignments.len() > 1;
 	if elastic_scaling {
 		gum::debug!(
 			target: LOG_TARGET,
 			para_id = %id,
-			cores = ?our_cores,
-			"{} is assigned to {} cores at {}", id, our_cores.len(), candidate_relay_parent,
+			cores = ?per_relay_parent.assignments.keys(),
+			"{} is assigned to {} cores at {}", id, per_relay_parent.assignments.len(), candidate_relay_parent,
 		);
 	}
 
-	// Double check that the specified `core_index` is among the ones our para has assignments for.
-	if !our_cores.iter().any(|assigned_core| assigned_core == &core_index) {
-		gum::warn!(
-			target: LOG_TARGET,
-			para_id = %id,
-			relay_parent = ?candidate_relay_parent,
-			cores = ?our_cores,
-			?core_index,
-			"Attempting to distribute collation for a core we are not assigned to ",
-		);
-
-		return Ok(())
-	}
-
 	let our_core = core_index;
 
 	// Determine the group on that core.
-	//
-	// When prospective parachains are disabled, candidate relay parent here is
-	// guaranteed to be an active leaf.
 	let GroupValidators { validators, session_index, group_index } =
-		determine_our_validators(ctx, runtime, our_core, num_cores, candidate_relay_parent).await?;
+		determine_our_validators(ctx, runtime, our_core, candidate_relay_parent).await?;
 
 	if validators.is_empty() {
 		gum::warn!(
@@ -495,7 +459,6 @@ async fn distribute_collation<Context>(
 		target: LOG_TARGET,
 		para_id = %id,
 		candidate_relay_parent = %candidate_relay_parent,
-		relay_parent_mode = ?relay_parent_mode,
 		?candidate_hash,
 		pov_hash = ?pov.hash(),
 		core = ?our_core,
@@ -531,35 +494,32 @@ async fn distribute_collation<Context>(
 		),
 	);
 
-	// If prospective parachains are disabled, a leaf should be known to peer.
-	// Otherwise, it should be present in allowed ancestry of some leaf.
+	// The leaf should be present in the allowed ancestry of some leaf.
 	//
 	// It's collation-producer responsibility to verify that there exists
 	// a hypothetical membership in a fragment chain for the candidate.
-	let interested =
-		state
-			.peer_data
-			.iter()
-			.filter(|(_, PeerData { view: v, .. })| match relay_parent_mode {
-				ProspectiveParachainsMode::Disabled => v.contains(&candidate_relay_parent),
-				ProspectiveParachainsMode::Enabled { .. } => v.iter().any(|block_hash| {
-					state.implicit_view.as_ref().map(|implicit_view| {
-						implicit_view
-							.known_allowed_relay_parents_under(block_hash, Some(id))
-							.unwrap_or_default()
-							.contains(&candidate_relay_parent)
-					}) == Some(true)
-				}),
-			});
+	let interested = state
+		.peer_data
+		.iter()
+		.filter(|(_, PeerData { view: v, .. })| {
+			v.iter().any(|block_hash| {
+				state.implicit_view.as_ref().map(|implicit_view| {
+					implicit_view
+						.known_allowed_relay_parents_under(block_hash, Some(id))
+						.unwrap_or_default()
+						.contains(&candidate_relay_parent)
+				}) == Some(true)
+			})
+		})
+		.map(|(id, _)| id);
 
 	// Make sure already connected peers get collations:
-	for (peer_id, peer_data) in interested {
+	for peer_id in interested {
 		advertise_collation(
 			ctx,
 			candidate_relay_parent,
 			per_relay_parent,
 			peer_id,
-			peer_data.version,
 			&state.peer_ids,
 			&mut state.advertisement_timeouts,
 			&state.metrics,
@@ -570,45 +530,6 @@ async fn distribute_collation<Context>(
 	Ok(())
 }
 
-/// Get the core indices that are assigned to the para being collated on if any
-/// and the total number of cores.
-async fn determine_cores(
-	sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
-	para_id: ParaId,
-	relay_parent: Hash,
-	relay_parent_mode: ProspectiveParachainsMode,
-) -> Result<(Vec<CoreIndex>, usize)> {
-	let cores = get_availability_cores(sender, relay_parent).await?;
-	let n_cores = cores.len();
-	let mut assigned_cores = Vec::new();
-	let maybe_claim_queue = fetch_claim_queue(sender, relay_parent).await?;
-
-	for (idx, core) in cores.iter().enumerate() {
-		let core_is_scheduled = match maybe_claim_queue {
-			Some(ref claim_queue) => {
-				// Runtime supports claim queue - use it.
-				claim_queue
-					.iter_claims_for_core(&CoreIndex(idx as u32))
-					.any(|para| para == &para_id)
-			},
-			None => match core {
-				CoreState::Scheduled(scheduled) if scheduled.para_id == para_id => true,
-				CoreState::Occupied(occupied) if relay_parent_mode.is_enabled() =>
-				// With async backing we don't care about the core state,
-				// it is only needed for figuring our validators group.
-					occupied.next_up_on_available.as_ref().map(|c| c.para_id) == Some(para_id),
-				_ => false,
-			},
-		};
-
-		if core_is_scheduled {
-			assigned_cores.push(CoreIndex::from(idx as u32));
-		}
-	}
-
-	Ok((assigned_cores, n_cores))
-}
-
 /// Validators of a particular group index.
 #[derive(Debug)]
 struct GroupValidators {
@@ -627,7 +548,6 @@ async fn determine_our_validators<Context>(
 	ctx: &mut Context,
 	runtime: &mut RuntimeInfo,
 	core_index: CoreIndex,
-	cores: usize,
 	relay_parent: Hash,
 ) -> Result<GroupValidators> {
 	let session_index = runtime.get_session_index_for_child(ctx.sender(), relay_parent).await?;
@@ -637,9 +557,10 @@ async fn determine_our_validators<Context>(
 		.session_info;
 	gum::debug!(target: LOG_TARGET, ?session_index, "Received session info");
 	let groups = &info.validator_groups;
+	let num_cores = groups.len();
 	let rotation_info = get_group_rotation_info(ctx.sender(), relay_parent).await?;
 
-	let current_group_index = rotation_info.group_for_core(core_index, cores);
+	let current_group_index = rotation_info.group_for_core(core_index, num_cores);
 	let current_validators =
 		groups.get(current_group_index).map(|v| v.as_slice()).unwrap_or_default();
 
@@ -657,46 +578,24 @@ async fn determine_our_validators<Context>(
 	Ok(current_validators)
 }
 
-/// Construct the declare message to be sent to validator depending on its
-/// network protocol version.
+/// Construct the declare message to be sent to validator.
 fn declare_message(
 	state: &mut State,
-	version: CollationVersion,
 ) -> Option<Versioned<protocol_v1::CollationProtocol, protocol_v2::CollationProtocol>> {
 	let para_id = state.collating_on?;
-	Some(match version {
-		CollationVersion::V1 => {
-			let declare_signature_payload =
-				protocol_v1::declare_signature_payload(&state.local_peer_id);
-			let wire_message = protocol_v1::CollatorProtocolMessage::Declare(
-				state.collator_pair.public(),
-				para_id,
-				state.collator_pair.sign(&declare_signature_payload),
-			);
-			Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message))
-		},
-		CollationVersion::V2 => {
-			let declare_signature_payload =
-				protocol_v2::declare_signature_payload(&state.local_peer_id);
-			let wire_message = protocol_v2::CollatorProtocolMessage::Declare(
-				state.collator_pair.public(),
-				para_id,
-				state.collator_pair.sign(&declare_signature_payload),
-			);
-			Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(wire_message))
-		},
-	})
+	let declare_signature_payload = protocol_v2::declare_signature_payload(&state.local_peer_id);
+	let wire_message = protocol_v2::CollatorProtocolMessage::Declare(
+		state.collator_pair.public(),
+		para_id,
+		state.collator_pair.sign(&declare_signature_payload),
+	);
+	Some(Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(wire_message)))
 }
 
 /// Issue versioned `Declare` collation message to the given `peer`.
 #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
-async fn declare<Context>(
-	ctx: &mut Context,
-	state: &mut State,
-	peer: &PeerId,
-	version: CollationVersion,
-) {
-	if let Some(wire_message) = declare_message(state, version) {
+async fn declare<Context>(ctx: &mut Context, state: &mut State, peer: &PeerId) {
+	if let Some(wire_message) = declare_message(state) {
 		ctx.send_message(NetworkBridgeTxMessage::SendCollationMessage(vec![*peer], wire_message))
 			.await;
 	}
@@ -735,7 +634,6 @@ async fn advertise_collation<Context>(
 	relay_parent: Hash,
 	per_relay_parent: &mut PerRelayParent,
 	peer: &PeerId,
-	protocol_version: CollationVersion,
 	peer_ids: &HashMap<PeerId, HashSet<AuthorityDiscoveryId>>,
 	advertisement_timeouts: &mut FuturesUnordered<ResetInterestTimeout>,
 	metrics: &Metrics,
@@ -744,19 +642,6 @@ async fn advertise_collation<Context>(
 		let core_index = *collation_and_core.core_index();
 		let collation = collation_and_core.collation_mut();
 
-		// Check that peer will be able to request the collation.
-		if let CollationVersion::V1 = protocol_version {
-			if per_relay_parent.prospective_parachains_mode.is_enabled() {
-				gum::trace!(
-					target: LOG_TARGET,
-					?relay_parent,
-					peer_id = %peer,
-					"Skipping advertising to validator, incorrect network protocol version",
-				);
-				return
-			}
-		}
-
 		let Some(validator_group) = per_relay_parent.validator_group.get_mut(&core_index) else {
 			gum::debug!(
 				target: LOG_TARGET,
@@ -793,25 +678,15 @@ async fn advertise_collation<Context>(
 
 		collation.status.advance_to_advertised();
 
-		let collation_message = match protocol_version {
-			CollationVersion::V2 => {
-				let wire_message = protocol_v2::CollatorProtocolMessage::AdvertiseCollation {
+		ctx.send_message(NetworkBridgeTxMessage::SendCollationMessage(
+			vec![*peer],
+			Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(
+				protocol_v2::CollatorProtocolMessage::AdvertiseCollation {
 					relay_parent,
 					candidate_hash: *candidate_hash,
 					parent_head_data_hash: collation.parent_head_data.hash(),
-				};
-				Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(wire_message))
-			},
-			CollationVersion::V1 => {
-				let wire_message =
-					protocol_v1::CollatorProtocolMessage::AdvertiseCollation(relay_parent);
-				Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message))
-			},
-		};
-
-		ctx.send_message(NetworkBridgeTxMessage::SendCollationMessage(
-			vec![*peer],
-			collation_message,
+				},
+			)),
 		))
 		.await;
 
@@ -933,7 +808,7 @@ async fn send_collation(
 				parent_head_data: head_data,
 			}),
 		ParentHeadData::OnlyHash(_) =>
-			Ok(request_v1::CollationFetchingResponse::Collation(receipt, pov)),
+			Ok(request_v2::CollationFetchingResponse::Collation(receipt, pov)),
 	};
 
 	let response =
@@ -998,7 +873,16 @@ async fn handle_incoming_peer_message<Context>(
 			ctx.send_message(NetworkBridgeTxMessage::DisconnectPeer(origin, PeerSet::Collation))
 				.await;
 		},
-		Versioned::V1(V1::CollationSeconded(relay_parent, statement)) |
+		Versioned::V1(V1::CollationSeconded(relay_parent, statement)) => {
+			// Impossible, we no longer accept connections on v1.
+			gum::warn!(
+				target: LOG_TARGET,
+				?statement,
+				?origin,
+				?relay_parent,
+				"Collation seconded message received on unsupported protocol version 1",
+			);
+		},
 		Versioned::V2(V2::CollationSeconded(relay_parent, statement)) |
 		Versioned::V3(V2::CollationSeconded(relay_parent, statement)) => {
 			if !matches!(statement.unchecked_payload(), Statement::Seconded(_)) {
@@ -1092,24 +976,10 @@ async fn handle_incoming_request<Context>(
 					return Ok(())
 				},
 			};
-			let mode = per_relay_parent.prospective_parachains_mode;
 
 			let collation_with_core = match &req {
-				VersionedCollationRequest::V1(_) if !mode.is_enabled() =>
-					per_relay_parent.collations.values_mut().next(),
 				VersionedCollationRequest::V2(req) =>
 					per_relay_parent.collations.get_mut(&req.payload.candidate_hash),
-				_ => {
-					gum::warn!(
-						target: LOG_TARGET,
-						relay_parent = %relay_parent,
-						prospective_parachains_mode = ?mode,
-						?peer_id,
-						"Collation request version is invalid",
-					);
-
-					return Ok(())
-				},
 			};
 			let (receipt, pov, parent_head_data) =
 				if let Some(collation_with_core) = collation_with_core {
@@ -1187,9 +1057,7 @@ async fn handle_peer_view_change<Context>(
 	peer_id: PeerId,
 	view: View,
 ) {
-	let Some(PeerData { view: current, version, unknown_heads }) =
-		state.peer_data.get_mut(&peer_id)
-	else {
+	let Some(PeerData { view: current, unknown_heads }) = state.peer_data.get_mut(&peer_id) else {
 		return
 	};
 
@@ -1198,20 +1066,15 @@ async fn handle_peer_view_change<Context>(
 	*current = view;
 
 	for added in added.into_iter() {
-		let block_hashes = match state
-			.per_relay_parent
-			.get(&added)
-			.map(|per_relay_parent| per_relay_parent.prospective_parachains_mode)
-		{
-			Some(ProspectiveParachainsMode::Disabled) => std::slice::from_ref(&added),
-			Some(ProspectiveParachainsMode::Enabled { .. }) => state
+		let block_hashes = match state.per_relay_parent.contains_key(&added) {
+			true => state
 				.implicit_view
 				.as_ref()
 				.and_then(|implicit_view| {
 					implicit_view.known_allowed_relay_parents_under(&added, state.collating_on)
 				})
 				.unwrap_or_default(),
-			None => {
+			false => {
 				gum::trace!(
 					target: LOG_TARGET,
 					?peer_id,
@@ -1235,7 +1098,6 @@ async fn handle_peer_view_change<Context>(
 				*block_hash,
 				per_relay_parent,
 				&peer_id,
-				*version,
 				&state.peer_ids,
 				&mut state.advertisement_timeouts,
 				&state.metrics,
@@ -1261,7 +1123,7 @@ async fn handle_network_msg<Context>(
 			// it should be handled here.
 			gum::trace!(target: LOG_TARGET, ?peer_id, ?observed_role, ?maybe_authority, "Peer connected");
 
-			let version = match protocol_version.try_into() {
+			let version: CollationVersion = match protocol_version.try_into() {
 				Ok(version) => version,
 				Err(err) => {
 					// Network bridge is expected to handle this.
@@ -1275,9 +1137,25 @@ async fn handle_network_msg<Context>(
 					return Ok(())
 				},
 			};
+			if version == CollationVersion::V1 {
+				gum::warn!(
+					target: LOG_TARGET,
+					?peer_id,
+					?observed_role,
+					"Unsupported protocol version v1"
+				);
+
+				// V1 no longer supported, we should disconnect.
+				ctx.send_message(NetworkBridgeTxMessage::DisconnectPeer(
+					peer_id,
+					PeerSet::Collation,
+				))
+				.await;
+				return Ok(())
+			}
+
 			state.peer_data.entry(peer_id).or_insert_with(|| PeerData {
 				view: View::default(),
-				version,
 				// Unlikely that the collator is falling 10 blocks behind and if so, it probably is
 				// not able to keep up any way.
 				unknown_heads: LruMap::new(ByLength::new(10)),
@@ -1292,7 +1170,7 @@ async fn handle_network_msg<Context>(
 				);
 				state.peer_ids.insert(peer_id, authority_ids);
 
-				declare(ctx, state, &peer_id, version).await;
+				declare(ctx, state, &peer_id).await;
 			}
 		},
 		PeerViewChange(peer_id, view) => {
@@ -1313,9 +1191,9 @@ async fn handle_network_msg<Context>(
 		},
 		UpdatedAuthorityIds(peer_id, authority_ids) => {
 			gum::trace!(target: LOG_TARGET, ?peer_id, ?authority_ids, "Updated authority ids");
-			if let Some(version) = state.peer_data.get(&peer_id).map(|d| d.version) {
+			if state.peer_data.contains_key(&peer_id) {
 				if state.peer_ids.insert(peer_id, authority_ids).is_none() {
-					declare(ctx, state, &peer_id, version).await;
+					declare(ctx, state, &peer_id).await;
 				}
 			}
 		},
@@ -1334,77 +1212,66 @@ async fn handle_our_view_change<Context>(
 	state: &mut State,
 	view: OurView,
 ) -> Result<()> {
-	let current_leaves = state.active_leaves.clone();
+	let Some(implicit_view) = &mut state.implicit_view else { return Ok(()) };
+	let Some(para_id) = state.collating_on else { return Ok(()) };
 
-	let removed = current_leaves.iter().filter(|(h, _)| !view.contains(h));
-	let added = view.iter().filter(|h| !current_leaves.contains_key(h));
+	let removed: Vec<_> =
+		implicit_view.leaves().map(|l| *l).filter(|h| !view.contains(h)).collect();
+	let added: Vec<_> = view.iter().filter(|h| !implicit_view.contains_leaf(h)).collect();
 
 	for leaf in added {
-		let mode = prospective_parachains_mode(ctx.sender(), *leaf).await?;
-
-		state.active_leaves.insert(*leaf, mode);
-		state.per_relay_parent.insert(*leaf, PerRelayParent::new(mode));
-
-		if mode.is_enabled() {
-			if let Some(ref mut implicit_view) = state.implicit_view {
-				implicit_view
-					.activate_leaf(ctx.sender(), *leaf)
-					.await
-					.map_err(Error::ImplicitViewFetchError)?;
-
-				let allowed_ancestry = implicit_view
-					.known_allowed_relay_parents_under(leaf, state.collating_on)
-					.unwrap_or_default();
-
-				// Get the peers that already reported us this head, but we didn't knew it at this
-				// point.
-				let peers = state
-					.peer_data
-					.iter_mut()
-					.filter_map(|(id, data)| {
-						data.unknown_heads.remove(leaf).map(|_| (id, data.version))
-					})
-					.collect::<Vec<_>>();
-
-				for block_hash in allowed_ancestry {
-					let per_relay_parent = state
-						.per_relay_parent
-						.entry(*block_hash)
-						.or_insert_with(|| PerRelayParent::new(mode));
-
-					// Announce relevant collations to these peers.
-					for (peer_id, peer_version) in &peers {
-						advertise_collation(
-							ctx,
-							*block_hash,
-							per_relay_parent,
-							&peer_id,
-							*peer_version,
-							&state.peer_ids,
-							&mut state.advertisement_timeouts,
-							&state.metrics,
-						)
-						.await;
-					}
-				}
+		let claim_queue = fetch_claim_queue(ctx.sender(), *leaf).await?;
+		state.per_relay_parent.insert(*leaf, PerRelayParent::new(para_id, claim_queue));
+
+		implicit_view
+			.activate_leaf(ctx.sender(), *leaf)
+			.await
+			.map_err(Error::ImplicitViewFetchError)?;
+
+		let allowed_ancestry = implicit_view
+			.known_allowed_relay_parents_under(leaf, state.collating_on)
+			.unwrap_or_default();
+
+		// Get the peers that already reported us this head, but we didn't know it at this
+		// point.
+		let peers = state
+			.peer_data
+			.iter_mut()
+			.filter_map(|(id, data)| data.unknown_heads.remove(leaf).map(|_| id))
+			.collect::<Vec<_>>();
+
+		for block_hash in allowed_ancestry {
+			if state.per_relay_parent.get(block_hash).is_none() {
+				let claim_queue = fetch_claim_queue(ctx.sender(), *block_hash).await?;
+				state
+					.per_relay_parent
+					.insert(*block_hash, PerRelayParent::new(para_id, claim_queue));
+			}
+
+			let per_relay_parent =
+				state.per_relay_parent.get_mut(block_hash).expect("Just inserted");
+
+			// Announce relevant collations to these peers.
+			for peer_id in &peers {
+				advertise_collation(
+					ctx,
+					*block_hash,
+					per_relay_parent,
+					&peer_id,
+					&state.peer_ids,
+					&mut state.advertisement_timeouts,
+					&state.metrics,
+				)
+				.await;
 			}
 		}
 	}
 
-	for (leaf, mode) in removed {
-		state.active_leaves.remove(leaf);
+	for leaf in removed {
 		// If the leaf is deactivated it still may stay in the view as a part
 		// of implicit ancestry. Only update the state after the hash is actually
 		// pruned from the block info storage.
-		let pruned = if mode.is_enabled() {
-			state
-				.implicit_view
-				.as_mut()
-				.map(|view| view.deactivate_leaf(*leaf))
-				.unwrap_or_default()
-		} else {
-			vec![*leaf]
-		};
+		let pruned = implicit_view.deactivate_leaf(leaf);
 
 		for removed in &pruned {
 			gum::debug!(target: LOG_TARGET, relay_parent = ?removed, "Removing relay parent because our view changed.");
@@ -1454,7 +1321,6 @@ pub(crate) async fn run<Context>(
 	ctx: Context,
 	local_peer_id: PeerId,
 	collator_pair: CollatorPair,
-	req_v1_receiver: IncomingRequestReceiver<request_v1::CollationFetchingRequest>,
 	req_v2_receiver: IncomingRequestReceiver<request_v2::CollationFetchingRequest>,
 	metrics: Metrics,
 ) -> std::result::Result<(), FatalError> {
@@ -1462,7 +1328,6 @@ pub(crate) async fn run<Context>(
 		ctx,
 		local_peer_id,
 		collator_pair,
-		req_v1_receiver,
 		req_v2_receiver,
 		metrics,
 		ReputationAggregator::default(),
@@ -1476,7 +1341,6 @@ async fn run_inner<Context>(
 	mut ctx: Context,
 	local_peer_id: PeerId,
 	collator_pair: CollatorPair,
-	mut req_v1_receiver: IncomingRequestReceiver<request_v1::CollationFetchingRequest>,
 	mut req_v2_receiver: IncomingRequestReceiver<request_v2::CollationFetchingRequest>,
 	metrics: Metrics,
 	reputation: ReputationAggregator,
@@ -1492,9 +1356,7 @@ async fn run_inner<Context>(
 
 	loop {
 		let reputation_changes = || vec![COST_INVALID_REQUEST];
-		let recv_req_v1 = req_v1_receiver.recv(reputation_changes).fuse();
 		let recv_req_v2 = req_v2_receiver.recv(reputation_changes).fuse();
-		pin_mut!(recv_req_v1);
 		pin_mut!(recv_req_v2);
 
 		let mut reconnect_timeout = &mut state.reconnect_timeout;
@@ -1558,18 +1420,7 @@ async fn run_inner<Context>(
 						None => continue,
 					};
 
-					match (per_relay_parent.prospective_parachains_mode, &next) {
-						(ProspectiveParachainsMode::Disabled, VersionedCollationRequest::V1(_)) => {
-							per_relay_parent.collations.values().next()
-						},
-						(ProspectiveParachainsMode::Enabled { .. }, VersionedCollationRequest::V2(req)) => {
-							per_relay_parent.collations.get(&req.payload.candidate_hash)
-						},
-						_ => {
-							// Request version is checked in `handle_incoming_request`.
-							continue
-						},
-					}
+					per_relay_parent.collations.get(&next.candidate_hash())
 				};
 
 				if let Some(collation_with_core) = next_collation_with_core {
@@ -1601,14 +1452,6 @@ async fn run_inner<Context>(
 					"Peer-set updated due to a timeout"
 				);
 			},
-			in_req = recv_req_v1 => {
-				let request = in_req.map(VersionedCollationRequest::from);
-
-				log_error(
-					handle_incoming_request(&mut ctx, &mut state, request).await,
-					"Handling incoming collation fetch request V1"
-				)?;
-			}
 			in_req = recv_req_v2 => {
 				let request = in_req.map(VersionedCollationRequest::from);
 
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs
index 23954f8d781bdfa7c78a0ddd75898aff1d893bd3..5e7bc2d569d41e04a7dee77d9dbd989ec7d0af52 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs
@@ -33,33 +33,31 @@ use sp_keyring::Sr25519Keyring;
 use sp_runtime::traits::AppVerify;
 
 use polkadot_node_network_protocol::{
-	our_view,
 	peer_set::CollationVersion,
-	request_response::{IncomingRequest, ReqProtocolNames},
+	request_response::{
+		v2::{CollationFetchingRequest, CollationFetchingResponse},
+		IncomingRequest, ReqProtocolNames,
+	},
 	view,
 };
 use polkadot_node_primitives::BlockData;
-use polkadot_node_subsystem::{
-	errors::RuntimeApiError,
-	messages::{AllMessages, ReportPeerMessage, RuntimeApiMessage, RuntimeApiRequest},
-	ActiveLeavesUpdate,
+use polkadot_node_subsystem::messages::{
+	AllMessages, ReportPeerMessage, RuntimeApiMessage, RuntimeApiRequest,
 };
 use polkadot_node_subsystem_test_helpers as test_helpers;
 use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt};
 use polkadot_primitives::{
 	AuthorityDiscoveryId, Block, CollatorPair, ExecutorParams, GroupIndex, GroupRotationInfo,
-	IndexedVec, NodeFeatures, ScheduledCore, SessionIndex, SessionInfo, ValidatorId,
-	ValidatorIndex,
+	IndexedVec, NodeFeatures, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex,
 };
 use polkadot_primitives_test_helpers::TestCandidateBuilder;
-use test_helpers::mock::new_leaf;
 
 mod prospective_parachains;
+use prospective_parachains::{expect_declare_msg, update_view};
 
 const REPUTATION_CHANGE_TEST_INTERVAL: Duration = Duration::from_millis(10);
 
-const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError =
-	RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" };
+const SCHEDULING_LOOKAHEAD: usize = 4;
 
 #[derive(Clone)]
 struct TestState {
@@ -108,7 +106,8 @@ impl Default for TestState {
 			GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 100, now: 1 };
 
 		let mut claim_queue = BTreeMap::new();
-		claim_queue.insert(CoreIndex(0), [para_id].into_iter().collect());
+		claim_queue
+			.insert(CoreIndex(0), std::iter::repeat(para_id).take(SCHEDULING_LOOKAHEAD).collect());
 		claim_queue.insert(CoreIndex(1), VecDeque::new());
 
 		let relay_parent = Hash::random();
@@ -179,47 +178,6 @@ impl TestState {
 			.map(|i| self.session_info.discovery_keys[i.0 as usize].clone())
 			.collect()
 	}
-
-	/// Generate a new relay parent and inform the subsystem about the new view.
-	///
-	/// If `merge_views == true` it means the subsystem will be informed that we are working on the
-	/// old `relay_parent` and the new one.
-	async fn advance_to_new_round(
-		&mut self,
-		virtual_overseer: &mut VirtualOverseer,
-		merge_views: bool,
-	) {
-		let old_relay_parent = self.relay_parent;
-
-		while self.relay_parent == old_relay_parent {
-			self.relay_parent.randomize();
-		}
-
-		let our_view = if merge_views {
-			our_view![old_relay_parent, self.relay_parent]
-		} else {
-			our_view![self.relay_parent]
-		};
-
-		overseer_send(
-			virtual_overseer,
-			CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange(
-				our_view,
-			)),
-		)
-		.await;
-
-		assert_matches!(
-			overseer_recv(virtual_overseer).await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-				relay_parent,
-				RuntimeApiRequest::AsyncBackingParams(tx)
-			)) => {
-				assert_eq!(relay_parent, self.relay_parent);
-				tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap();
-			}
-		);
-	}
 }
 
 type VirtualOverseer =
@@ -227,7 +185,6 @@ type VirtualOverseer =
 
 struct TestHarness {
 	virtual_overseer: VirtualOverseer,
-	req_v1_cfg: sc_network::config::RequestResponseConfig,
 	req_v2_cfg: sc_network::config::RequestResponseConfig,
 }
 
@@ -247,10 +204,6 @@ fn test_harness<T: Future<Output = TestHarness>>(
 	let genesis_hash = Hash::repeat_byte(0xff);
 	let req_protocol_names = ReqProtocolNames::new(&genesis_hash, None);
 
-	let (collation_req_receiver, req_v1_cfg) = IncomingRequest::get_config_receiver::<
-		Block,
-		sc_network::NetworkWorker<Block, Hash>,
-	>(&req_protocol_names);
 	let (collation_req_v2_receiver, req_v2_cfg) = IncomingRequest::get_config_receiver::<
 		Block,
 		sc_network::NetworkWorker<Block, Hash>,
@@ -260,7 +213,6 @@ fn test_harness<T: Future<Output = TestHarness>>(
 			context,
 			local_peer_id,
 			collator_pair,
-			collation_req_receiver,
 			collation_req_v2_receiver,
 			Default::default(),
 			reputation,
@@ -270,7 +222,7 @@ fn test_harness<T: Future<Output = TestHarness>>(
 		.unwrap();
 	};
 
-	let test_fut = test(TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg });
+	let test_fut = test(TestHarness { virtual_overseer, req_v2_cfg });
 
 	futures::pin_mut!(test_fut);
 	futures::pin_mut!(subsystem);
@@ -334,39 +286,6 @@ async fn overseer_signal(overseer: &mut VirtualOverseer, signal: OverseerSignal)
 		.expect(&format!("{:?} is more than enough for sending signals.", TIMEOUT));
 }
 
-// Setup the system by sending the `CollateOn`, `ActiveLeaves` and `OurViewChange` messages.
-async fn setup_system(virtual_overseer: &mut VirtualOverseer, test_state: &TestState) {
-	overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)).await;
-
-	overseer_signal(
-		virtual_overseer,
-		OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf(
-			test_state.relay_parent,
-			1,
-		))),
-	)
-	.await;
-
-	overseer_send(
-		virtual_overseer,
-		CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange(our_view![
-			test_state.relay_parent
-		])),
-	)
-	.await;
-
-	assert_matches!(
-		overseer_recv(virtual_overseer).await,
-		AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-			relay_parent,
-			RuntimeApiRequest::AsyncBackingParams(tx)
-		)) => {
-			assert_eq!(relay_parent, test_state.relay_parent);
-			tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap();
-		}
-	);
-}
-
 /// Result of [`distribute_collation`]
 struct DistributeCollation {
 	candidate: CandidateReceipt,
@@ -390,52 +309,11 @@ async fn distribute_collation_with_receipt(
 			pov: pov.clone(),
 			parent_head_data: HeadData(vec![1, 2, 3]),
 			result_sender: None,
-			core_index: CoreIndex(0),
+			core_index: candidate.descriptor.core_index().unwrap(),
 		},
 	)
 	.await;
 
-	// obtain the availability cores.
-	assert_matches!(
-		overseer_recv(virtual_overseer).await,
-		AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-			_relay_parent,
-			RuntimeApiRequest::AvailabilityCores(tx)
-		)) => {
-			assert_eq!(relay_parent, _relay_parent);
-			tx.send(Ok(test_state.claim_queue.values().map(|paras|
-				if let Some(para) = paras.front() {
-					CoreState::Scheduled(ScheduledCore { para_id: *para, collator: None })
-				} else {
-					CoreState::Free
-				}
-			).collect())).unwrap();
-		}
-	);
-
-	assert_matches!(
-		overseer_recv(virtual_overseer).await,
-		AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-			_relay_parent,
-			RuntimeApiRequest::Version(tx)
-		)) => {
-			assert_eq!(relay_parent, _relay_parent);
-			tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap();
-		}
-	);
-
-	// obtain the claim queue schedule.
-	assert_matches!(
-		overseer_recv(virtual_overseer).await,
-		AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-			_relay_parent,
-			RuntimeApiRequest::ClaimQueue(tx)
-		)) => {
-			assert_eq!(relay_parent, _relay_parent);
-			tx.send(Ok(test_state.claim_queue.clone())).unwrap();
-		}
-	);
-
 	// We don't know precisely what is going to come as session info might be cached:
 	loop {
 		match overseer_recv(virtual_overseer).await {
@@ -555,14 +433,16 @@ async fn connect_peer(
 	)
 	.await;
 
-	overseer_send(
-		virtual_overseer,
-		CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange(
-			peer,
-			view![],
-		)),
-	)
-	.await;
+	if version != CollationVersion::V1 {
+		overseer_send(
+			virtual_overseer,
+			CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerViewChange(
+				peer,
+				view![],
+			)),
+		)
+		.await;
+	}
 }
 
 /// Disconnect a peer
@@ -574,52 +454,15 @@ async fn disconnect_peer(virtual_overseer: &mut VirtualOverseer, peer: PeerId) {
 	.await;
 }
 
-/// Check that the next received message is a `Declare` message.
-async fn expect_declare_msg(
-	virtual_overseer: &mut VirtualOverseer,
-	test_state: &TestState,
-	peer: &PeerId,
-) {
-	assert_matches!(
-		overseer_recv(virtual_overseer).await,
-		AllMessages::NetworkBridgeTx(
-			NetworkBridgeTxMessage::SendCollationMessage(
-				to,
-				Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)),
-			)
-		) => {
-			assert_eq!(to[0], *peer);
-			assert_matches!(
-				wire_message,
-				protocol_v1::CollatorProtocolMessage::Declare(
-					collator_id,
-					para_id,
-					signature,
-				) => {
-					assert!(signature.verify(
-						&*protocol_v1::declare_signature_payload(&test_state.local_peer_id),
-						&collator_id),
-					);
-					assert_eq!(collator_id, test_state.collator_pair.public());
-					assert_eq!(para_id, test_state.para_id);
-				}
-			);
-		}
-	);
-}
-
 /// Check that the next received message is a collation advertisement message.
-///
-/// Expects v2 message if `expected_candidate_hashes` is `Some`, v1 otherwise.
 async fn expect_advertise_collation_msg(
 	virtual_overseer: &mut VirtualOverseer,
 	any_peers: &[PeerId],
 	expected_relay_parent: Hash,
-	expected_candidate_hashes: Option<Vec<CandidateHash>>,
+	expected_candidate_hashes: Vec<CandidateHash>,
 ) {
-	let mut candidate_hashes: Option<HashSet<_>> =
-		expected_candidate_hashes.map(|hashes| hashes.into_iter().collect());
-	let iter_num = candidate_hashes.as_ref().map(|hashes| hashes.len()).unwrap_or(1);
+	let mut candidate_hashes: HashSet<_> = expected_candidate_hashes.into_iter().collect();
+	let iter_num = candidate_hashes.len();
 
 	for _ in 0..iter_num {
 		assert_matches!(
@@ -631,23 +474,10 @@ async fn expect_advertise_collation_msg(
 				)
 			) => {
 				assert!(any_peers.iter().any(|p| to.contains(p)));
-				match (candidate_hashes.as_mut(), wire_message) {
-					(None, Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message))) => {
-						assert_matches!(
-							wire_message,
-							protocol_v1::CollatorProtocolMessage::AdvertiseCollation(
-								relay_parent,
-							) => {
-								assert_eq!(relay_parent, expected_relay_parent);
-							}
-						);
-					},
-					(
-						Some(candidate_hashes),
-						Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(
-							wire_message,
-						)),
-					) => {
+				match wire_message {
+					Versioned::V2(protocol_v2::CollationProtocol::CollatorProtocol(
+						wire_message,
+					)) => {
 						assert_matches!(
 							wire_message,
 							protocol_v2::CollatorProtocolMessage::AdvertiseCollation {
@@ -687,17 +517,59 @@ async fn send_peer_view_change(
 }
 
 fn decode_collation_response(bytes: &[u8]) -> (CandidateReceipt, PoV) {
-	let response: request_v1::CollationFetchingResponse =
-		request_v1::CollationFetchingResponse::decode(&mut &bytes[..])
-			.expect("Decoding should work");
+	let response: CollationFetchingResponse =
+		CollationFetchingResponse::decode(&mut &bytes[..]).expect("Decoding should work");
 	match response {
-		request_v1::CollationFetchingResponse::Collation(receipt, pov) => (receipt, pov),
-		request_v1::CollationFetchingResponse::CollationWithParentHeadData {
-			receipt, pov, ..
-		} => (receipt, pov),
+		CollationFetchingResponse::Collation(receipt, pov) => (receipt, pov),
+		CollationFetchingResponse::CollationWithParentHeadData { receipt, pov, .. } =>
+			(receipt, pov),
 	}
 }
 
+// Test that connecting on v1 results in disconnect.
+#[test]
+fn v1_protocol_rejected() {
+	let test_state = TestState::default();
+	let local_peer_id = test_state.local_peer_id;
+	let collator_pair = test_state.collator_pair.clone();
+
+	test_harness(
+		local_peer_id,
+		collator_pair,
+		ReputationAggregator::new(|_| true),
+		|mut test_harness| async move {
+			let virtual_overseer = &mut test_harness.virtual_overseer;
+
+			overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id))
+				.await;
+
+			update_view(&test_state, virtual_overseer, vec![(test_state.relay_parent, 10)], 1)
+				.await;
+
+			distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true)
+				.await;
+
+			for (val, peer) in test_state
+				.current_group_validator_authority_ids()
+				.into_iter()
+				.zip(test_state.current_group_validator_peer_ids())
+			{
+				connect_peer(virtual_overseer, peer, CollationVersion::V1, Some(val.clone())).await;
+
+				assert_matches!(
+					overseer_recv(virtual_overseer).await,
+					AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::DisconnectPeer(bad_peer, peer_set)) => {
+						assert_eq!(peer_set, PeerSet::Collation);
+						assert_eq!(bad_peer, peer);
+					}
+				);
+			}
+
+			test_harness
+		},
+	);
+}
+
 #[test]
 fn advertise_and_send_collation() {
 	let mut test_state = TestState::default();
@@ -710,10 +582,16 @@ fn advertise_and_send_collation() {
 		ReputationAggregator::new(|_| true),
 		|test_harness| async move {
 			let mut virtual_overseer = test_harness.virtual_overseer;
-			let mut req_v1_cfg = test_harness.req_v1_cfg;
-			let req_v2_cfg = test_harness.req_v2_cfg;
+			let mut req_v2_cfg = test_harness.req_v2_cfg;
+
+			overseer_send(
+				&mut virtual_overseer,
+				CollatorProtocolMessage::CollateOn(test_state.para_id),
+			)
+			.await;
 
-			setup_system(&mut virtual_overseer, &test_state).await;
+			update_view(&test_state, &mut virtual_overseer, vec![(test_state.relay_parent, 10)], 1)
+				.await;
 
 			let DistributeCollation { candidate, pov_block } = distribute_collation(
 				&mut virtual_overseer,
@@ -728,7 +606,7 @@ fn advertise_and_send_collation() {
 				.into_iter()
 				.zip(test_state.current_group_validator_peer_ids())
 			{
-				connect_peer(&mut virtual_overseer, peer, CollationVersion::V1, Some(val.clone()))
+				connect_peer(&mut virtual_overseer, peer, CollationVersion::V2, Some(val.clone()))
 					.await;
 			}
 
@@ -751,20 +629,21 @@ fn advertise_and_send_collation() {
 				&mut virtual_overseer,
 				&[peer],
 				test_state.relay_parent,
-				None,
+				vec![candidate.hash()],
 			)
 			.await;
 
 			// Request a collation.
 			let (pending_response, rx) = oneshot::channel();
-			req_v1_cfg
+			req_v2_cfg
 				.inbound_queue
 				.as_mut()
 				.unwrap()
 				.send(RawIncomingRequest {
 					peer,
-					payload: request_v1::CollationFetchingRequest {
+					payload: CollationFetchingRequest {
 						relay_parent: test_state.relay_parent,
+						candidate_hash: candidate.hash(),
 						para_id: test_state.para_id,
 					}
 					.encode(),
@@ -776,14 +655,15 @@ fn advertise_and_send_collation() {
 			{
 				let (pending_response, rx) = oneshot::channel();
 
-				req_v1_cfg
+				req_v2_cfg
 					.inbound_queue
 					.as_mut()
 					.unwrap()
 					.send(RawIncomingRequest {
 						peer,
-						payload: request_v1::CollationFetchingRequest {
+						payload: CollationFetchingRequest {
 							relay_parent: test_state.relay_parent,
+							candidate_hash: candidate.hash(),
 							para_id: test_state.para_id,
 						}
 						.encode(),
@@ -817,21 +697,26 @@ fn advertise_and_send_collation() {
 			);
 
 			let old_relay_parent = test_state.relay_parent;
-			test_state.advance_to_new_round(&mut virtual_overseer, false).await;
+			test_state.relay_parent.randomize();
+
+			// Update our view, making the old relay parent go out of the implicit view.
+			update_view(&test_state, &mut virtual_overseer, vec![(test_state.relay_parent, 20)], 1)
+				.await;
 
 			let peer = test_state.validator_peer_id[2];
 
-			// Re-request a collation.
+			// Re-request the collation.
 			let (pending_response, rx) = oneshot::channel();
 
-			req_v1_cfg
+			req_v2_cfg
 				.inbound_queue
 				.as_mut()
 				.unwrap()
 				.send(RawIncomingRequest {
 					peer,
-					payload: request_v1::CollationFetchingRequest {
+					payload: CollationFetchingRequest {
 						relay_parent: old_relay_parent,
+						candidate_hash: candidate.hash(),
 						para_id: test_state.para_id,
 					}
 					.encode(),
@@ -839,13 +724,18 @@ fn advertise_and_send_collation() {
 				})
 				.await
 				.unwrap();
-			// Re-requesting collation should fail:
+			// Re-requesting collation should fail, becasue the relay parent is out of the view.
 			rx.await.unwrap_err();
 
 			assert!(overseer_recv_with_timeout(&mut virtual_overseer, TIMEOUT).await.is_none());
 
-			distribute_collation(&mut virtual_overseer, &test_state, test_state.relay_parent, true)
-				.await;
+			let DistributeCollation { candidate, .. } = distribute_collation(
+				&mut virtual_overseer,
+				&test_state,
+				test_state.relay_parent,
+				true,
+			)
+			.await;
 
 			// Send info about peer's view.
 			overseer_send(
@@ -861,10 +751,10 @@ fn advertise_and_send_collation() {
 				&mut virtual_overseer,
 				&[peer],
 				test_state.relay_parent,
-				None,
+				vec![candidate.hash()],
 			)
 			.await;
-			TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg }
+			TestHarness { virtual_overseer, req_v2_cfg }
 		},
 	);
 }
@@ -881,12 +771,18 @@ fn delay_reputation_change() {
 		ReputationAggregator::new(|_| false),
 		|test_harness| async move {
 			let mut virtual_overseer = test_harness.virtual_overseer;
-			let mut req_v1_cfg = test_harness.req_v1_cfg;
-			let req_v2_cfg = test_harness.req_v2_cfg;
+			let mut req_v2_cfg = test_harness.req_v2_cfg;
 
-			setup_system(&mut virtual_overseer, &test_state).await;
+			overseer_send(
+				&mut virtual_overseer,
+				CollatorProtocolMessage::CollateOn(test_state.para_id),
+			)
+			.await;
+
+			update_view(&test_state, &mut virtual_overseer, vec![(test_state.relay_parent, 10)], 1)
+				.await;
 
-			let _ = distribute_collation(
+			let DistributeCollation { candidate, .. } = distribute_collation(
 				&mut virtual_overseer,
 				&test_state,
 				test_state.relay_parent,
@@ -899,7 +795,7 @@ fn delay_reputation_change() {
 				.into_iter()
 				.zip(test_state.current_group_validator_peer_ids())
 			{
-				connect_peer(&mut virtual_overseer, peer, CollationVersion::V1, Some(val.clone()))
+				connect_peer(&mut virtual_overseer, peer, CollationVersion::V2, Some(val.clone()))
 					.await;
 			}
 
@@ -922,21 +818,22 @@ fn delay_reputation_change() {
 				&mut virtual_overseer,
 				&[peer],
 				test_state.relay_parent,
-				None,
+				vec![candidate.hash()],
 			)
 			.await;
 
 			// Request a collation.
 			let (pending_response, _rx) = oneshot::channel();
-			req_v1_cfg
+			req_v2_cfg
 				.inbound_queue
 				.as_mut()
 				.unwrap()
 				.send(RawIncomingRequest {
 					peer,
-					payload: request_v1::CollationFetchingRequest {
+					payload: CollationFetchingRequest {
 						relay_parent: test_state.relay_parent,
 						para_id: test_state.para_id,
+						candidate_hash: candidate.hash(),
 					}
 					.encode(),
 					pending_response,
@@ -947,15 +844,16 @@ fn delay_reputation_change() {
 			{
 				let (pending_response, _rx) = oneshot::channel();
 
-				req_v1_cfg
+				req_v2_cfg
 					.inbound_queue
 					.as_mut()
 					.unwrap()
 					.send(RawIncomingRequest {
 						peer,
-						payload: request_v1::CollationFetchingRequest {
+						payload: CollationFetchingRequest {
 							relay_parent: test_state.relay_parent,
 							para_id: test_state.para_id,
+							candidate_hash: candidate.hash(),
 						}
 						.encode(),
 						pending_response,
@@ -978,85 +876,7 @@ fn delay_reputation_change() {
 				);
 			}
 
-			TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg }
-		},
-	);
-}
-
-/// Tests that collator side works with v2 network protocol
-/// before async backing is enabled.
-#[test]
-fn advertise_collation_v2_protocol() {
-	let test_state = TestState::default();
-	let local_peer_id = test_state.local_peer_id;
-	let collator_pair = test_state.collator_pair.clone();
-
-	test_harness(
-		local_peer_id,
-		collator_pair,
-		ReputationAggregator::new(|_| true),
-		|mut test_harness| async move {
-			let virtual_overseer = &mut test_harness.virtual_overseer;
-
-			setup_system(virtual_overseer, &test_state).await;
-
-			let DistributeCollation { candidate, .. } =
-				distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true)
-					.await;
-
-			let validators = test_state.current_group_validator_authority_ids();
-			assert!(validators.len() >= 2);
-			let peer_ids = test_state.current_group_validator_peer_ids();
-
-			// Connect first peer with v1.
-			connect_peer(
-				virtual_overseer,
-				peer_ids[0],
-				CollationVersion::V1,
-				Some(validators[0].clone()),
-			)
-			.await;
-			// The rest with v2.
-			for (val, peer) in validators.iter().zip(peer_ids.iter()).skip(1) {
-				connect_peer(virtual_overseer, *peer, CollationVersion::V2, Some(val.clone()))
-					.await;
-			}
-
-			// Declare messages.
-			expect_declare_msg(virtual_overseer, &test_state, &peer_ids[0]).await;
-			for peer_id in peer_ids.iter().skip(1) {
-				prospective_parachains::expect_declare_msg_v2(
-					virtual_overseer,
-					&test_state,
-					&peer_id,
-				)
-				.await;
-			}
-
-			// Send info about peers view.
-			for peer in peer_ids.iter() {
-				send_peer_view_change(virtual_overseer, peer, vec![test_state.relay_parent]).await;
-			}
-
-			// Versioned advertisements work.
-			expect_advertise_collation_msg(
-				virtual_overseer,
-				&[peer_ids[0]],
-				test_state.relay_parent,
-				None,
-			)
-			.await;
-			for peer_id in peer_ids.iter().skip(1) {
-				expect_advertise_collation_msg(
-					virtual_overseer,
-					&[*peer_id],
-					test_state.relay_parent,
-					Some(vec![candidate.hash()]), // This is `Some`, advertisement is v2.
-				)
-				.await;
-			}
-
-			test_harness
+			TestHarness { virtual_overseer, req_v2_cfg }
 		},
 	);
 }
@@ -1100,13 +920,25 @@ fn collators_declare_to_connected_peers() {
 			let peer = test_state.validator_peer_id[0];
 			let validator_id = test_state.current_group_validator_authority_ids()[0].clone();
 
-			setup_system(&mut test_harness.virtual_overseer, &test_state).await;
+			overseer_send(
+				&mut test_harness.virtual_overseer,
+				CollatorProtocolMessage::CollateOn(test_state.para_id),
+			)
+			.await;
+
+			update_view(
+				&test_state,
+				&mut test_harness.virtual_overseer,
+				vec![(test_state.relay_parent, 10)],
+				1,
+			)
+			.await;
 
 			// A validator connected to us
 			connect_peer(
 				&mut test_harness.virtual_overseer,
 				peer,
-				CollationVersion::V1,
+				CollationVersion::V2,
 				Some(validator_id),
 			)
 			.await;
@@ -1135,13 +967,17 @@ fn collations_are_only_advertised_to_validators_with_correct_view() {
 			let peer2 = test_state.current_group_validator_peer_ids()[1];
 			let validator_id2 = test_state.current_group_validator_authority_ids()[1].clone();
 
-			setup_system(virtual_overseer, &test_state).await;
+			overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id))
+				.await;
+
+			update_view(&test_state, virtual_overseer, vec![(test_state.relay_parent, 10)], 1)
+				.await;
 
 			// A validator connected to us
-			connect_peer(virtual_overseer, peer, CollationVersion::V1, Some(validator_id)).await;
+			connect_peer(virtual_overseer, peer, CollationVersion::V2, Some(validator_id)).await;
 
 			// Connect the second validator
-			connect_peer(virtual_overseer, peer2, CollationVersion::V1, Some(validator_id2)).await;
+			connect_peer(virtual_overseer, peer2, CollationVersion::V2, Some(validator_id2)).await;
 
 			expect_declare_msg(virtual_overseer, &test_state, &peer).await;
 			expect_declare_msg(virtual_overseer, &test_state, &peer2).await;
@@ -1149,14 +985,15 @@ fn collations_are_only_advertised_to_validators_with_correct_view() {
 			// And let it tell us that it is has the same view.
 			send_peer_view_change(virtual_overseer, &peer2, vec![test_state.relay_parent]).await;
 
-			distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true)
-				.await;
+			let DistributeCollation { candidate, .. } =
+				distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true)
+					.await;
 
 			expect_advertise_collation_msg(
 				virtual_overseer,
 				&[peer2],
 				test_state.relay_parent,
-				None,
+				vec![candidate.hash()],
 			)
 			.await;
 
@@ -1168,7 +1005,7 @@ fn collations_are_only_advertised_to_validators_with_correct_view() {
 				virtual_overseer,
 				&[peer],
 				test_state.relay_parent,
-				None,
+				vec![candidate.hash()],
 			)
 			.await;
 			test_harness
@@ -1195,31 +1032,50 @@ fn collate_on_two_different_relay_chain_blocks() {
 			let peer2 = test_state.current_group_validator_peer_ids()[1];
 			let validator_id2 = test_state.current_group_validator_authority_ids()[1].clone();
 
-			setup_system(virtual_overseer, &test_state).await;
+			overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id))
+				.await;
+
+			update_view(&test_state, virtual_overseer, vec![(test_state.relay_parent, 10)], 1)
+				.await;
 
 			// A validator connected to us
-			connect_peer(virtual_overseer, peer, CollationVersion::V1, Some(validator_id)).await;
+			connect_peer(virtual_overseer, peer, CollationVersion::V2, Some(validator_id)).await;
 
 			// Connect the second validator
-			connect_peer(virtual_overseer, peer2, CollationVersion::V1, Some(validator_id2)).await;
+			connect_peer(virtual_overseer, peer2, CollationVersion::V2, Some(validator_id2)).await;
 
 			expect_declare_msg(virtual_overseer, &test_state, &peer).await;
 			expect_declare_msg(virtual_overseer, &test_state, &peer2).await;
 
-			distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true)
-				.await;
+			let DistributeCollation { candidate: old_candidate, .. } =
+				distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true)
+					.await;
 
 			let old_relay_parent = test_state.relay_parent;
 
-			// Advance to a new round, while informing the subsystem that the old and the new relay
+			// Update our view, informing the subsystem that the old and the new relay
 			// parent are active.
-			test_state.advance_to_new_round(virtual_overseer, true).await;
+			test_state.relay_parent.randomize();
+			update_view(
+				&test_state,
+				virtual_overseer,
+				vec![(old_relay_parent, 10), (test_state.relay_parent, 10)],
+				1,
+			)
+			.await;
 
-			distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true)
-				.await;
+			let DistributeCollation { candidate: new_candidate, .. } =
+				distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true)
+					.await;
 
 			send_peer_view_change(virtual_overseer, &peer, vec![old_relay_parent]).await;
-			expect_advertise_collation_msg(virtual_overseer, &[peer], old_relay_parent, None).await;
+			expect_advertise_collation_msg(
+				virtual_overseer,
+				&[peer],
+				old_relay_parent,
+				vec![old_candidate.hash()],
+			)
+			.await;
 
 			send_peer_view_change(virtual_overseer, &peer2, vec![test_state.relay_parent]).await;
 
@@ -1227,7 +1083,7 @@ fn collate_on_two_different_relay_chain_blocks() {
 				virtual_overseer,
 				&[peer2],
 				test_state.relay_parent,
-				None,
+				vec![new_candidate.hash()],
 			)
 			.await;
 			test_harness
@@ -1251,28 +1107,33 @@ fn validator_reconnect_does_not_advertise_a_second_time() {
 			let peer = test_state.current_group_validator_peer_ids()[0];
 			let validator_id = test_state.current_group_validator_authority_ids()[0].clone();
 
-			setup_system(virtual_overseer, &test_state).await;
+			overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id))
+				.await;
+
+			update_view(&test_state, virtual_overseer, vec![(test_state.relay_parent, 10)], 1)
+				.await;
 
 			// A validator connected to us
-			connect_peer(virtual_overseer, peer, CollationVersion::V1, Some(validator_id.clone()))
+			connect_peer(virtual_overseer, peer, CollationVersion::V2, Some(validator_id.clone()))
 				.await;
 			expect_declare_msg(virtual_overseer, &test_state, &peer).await;
 
-			distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true)
-				.await;
+			let DistributeCollation { candidate, .. } =
+				distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true)
+					.await;
 
 			send_peer_view_change(virtual_overseer, &peer, vec![test_state.relay_parent]).await;
 			expect_advertise_collation_msg(
 				virtual_overseer,
 				&[peer],
 				test_state.relay_parent,
-				None,
+				vec![candidate.hash()],
 			)
 			.await;
 
 			// Disconnect and reconnect directly
 			disconnect_peer(virtual_overseer, peer).await;
-			connect_peer(virtual_overseer, peer, CollationVersion::V1, Some(validator_id)).await;
+			connect_peer(virtual_overseer, peer, CollationVersion::V2, Some(validator_id)).await;
 			expect_declare_msg(virtual_overseer, &test_state, &peer).await;
 
 			send_peer_view_change(virtual_overseer, &peer, vec![test_state.relay_parent]).await;
@@ -1300,10 +1161,14 @@ fn collators_reject_declare_messages() {
 			let peer = test_state.current_group_validator_peer_ids()[0];
 			let validator_id = test_state.current_group_validator_authority_ids()[0].clone();
 
-			setup_system(virtual_overseer, &test_state).await;
+			overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id))
+				.await;
+
+			update_view(&test_state, virtual_overseer, vec![(test_state.relay_parent, 10)], 1)
+				.await;
 
 			// A validator connected to us
-			connect_peer(virtual_overseer, peer, CollationVersion::V1, Some(validator_id)).await;
+			connect_peer(virtual_overseer, peer, CollationVersion::V2, Some(validator_id)).await;
 			expect_declare_msg(virtual_overseer, &test_state, &peer).await;
 
 			overseer_send(
@@ -1355,9 +1220,13 @@ where
 		ReputationAggregator::new(|_| true),
 		|mut test_harness| async move {
 			let virtual_overseer = &mut test_harness.virtual_overseer;
-			let req_cfg = &mut test_harness.req_v1_cfg;
+			let req_cfg = &mut test_harness.req_v2_cfg;
+
+			overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id))
+				.await;
 
-			setup_system(virtual_overseer, &test_state).await;
+			update_view(&test_state, virtual_overseer, vec![(test_state.relay_parent, 10)], 1)
+				.await;
 
 			let DistributeCollation { candidate, pov_block } =
 				distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true)
@@ -1368,7 +1237,7 @@ where
 				.into_iter()
 				.zip(test_state.current_group_validator_peer_ids())
 			{
-				connect_peer(virtual_overseer, peer, CollationVersion::V1, Some(val.clone())).await;
+				connect_peer(virtual_overseer, peer, CollationVersion::V2, Some(val.clone())).await;
 			}
 
 			// We declare to the connected validators that we are a collator.
@@ -1393,14 +1262,14 @@ where
 				virtual_overseer,
 				&[validator_0],
 				test_state.relay_parent,
-				None,
+				vec![candidate.hash()],
 			)
 			.await;
 			expect_advertise_collation_msg(
 				virtual_overseer,
 				&[validator_1],
 				test_state.relay_parent,
-				None,
+				vec![candidate.hash()],
 			)
 			.await;
 
@@ -1412,9 +1281,10 @@ where
 				.unwrap()
 				.send(RawIncomingRequest {
 					peer: validator_0,
-					payload: request_v1::CollationFetchingRequest {
+					payload: CollationFetchingRequest {
 						relay_parent: test_state.relay_parent,
 						para_id: test_state.para_id,
+						candidate_hash: candidate.hash(),
 					}
 					.encode(),
 					pending_response,
@@ -1446,9 +1316,10 @@ where
 				.unwrap()
 				.send(RawIncomingRequest {
 					peer: validator_1,
-					payload: request_v1::CollationFetchingRequest {
+					payload: CollationFetchingRequest {
 						relay_parent: test_state.relay_parent,
 						para_id: test_state.para_id,
+						candidate_hash: candidate.hash(),
 					}
 					.encode(),
 					pending_response,
@@ -1490,16 +1361,22 @@ fn connect_to_buffered_groups() {
 		ReputationAggregator::new(|_| true),
 		|test_harness| async move {
 			let mut virtual_overseer = test_harness.virtual_overseer;
-			let mut req_cfg = test_harness.req_v1_cfg;
-			let req_v2_cfg = test_harness.req_v2_cfg;
+			let mut req_cfg = test_harness.req_v2_cfg;
+
+			overseer_send(
+				&mut virtual_overseer,
+				CollatorProtocolMessage::CollateOn(test_state.para_id),
+			)
+			.await;
 
-			setup_system(&mut virtual_overseer, &test_state).await;
+			update_view(&test_state, &mut virtual_overseer, vec![(test_state.relay_parent, 10)], 1)
+				.await;
 
 			let group_a = test_state.current_group_validator_authority_ids();
 			let peers_a = test_state.current_group_validator_peer_ids();
 			assert!(group_a.len() > 1);
 
-			distribute_collation(
+			let DistributeCollation { candidate, .. } = distribute_collation(
 				&mut virtual_overseer,
 				&test_state,
 				test_state.relay_parent,
@@ -1519,7 +1396,7 @@ fn connect_to_buffered_groups() {
 			let head_a = test_state.relay_parent;
 
 			for (val, peer) in group_a.iter().zip(&peers_a) {
-				connect_peer(&mut virtual_overseer, *peer, CollationVersion::V1, Some(val.clone()))
+				connect_peer(&mut virtual_overseer, *peer, CollationVersion::V2, Some(val.clone()))
 					.await;
 			}
 
@@ -1530,8 +1407,13 @@ fn connect_to_buffered_groups() {
 			// Update views.
 			for peer_id in &peers_a {
 				send_peer_view_change(&mut virtual_overseer, peer_id, vec![head_a]).await;
-				expect_advertise_collation_msg(&mut virtual_overseer, &[*peer_id], head_a, None)
-					.await;
+				expect_advertise_collation_msg(
+					&mut virtual_overseer,
+					&[*peer_id],
+					head_a,
+					vec![candidate.hash()],
+				)
+				.await;
 			}
 
 			let peer = peers_a[0];
@@ -1543,9 +1425,10 @@ fn connect_to_buffered_groups() {
 				.unwrap()
 				.send(RawIncomingRequest {
 					peer,
-					payload: request_v1::CollationFetchingRequest {
+					payload: CollationFetchingRequest {
 						relay_parent: head_a,
 						para_id: test_state.para_id,
+						candidate_hash: candidate.hash(),
 					}
 					.encode(),
 					pending_response,
@@ -1565,7 +1448,17 @@ fn connect_to_buffered_groups() {
 			// Let the subsystem process process the collation event.
 			test_helpers::Yield::new().await;
 
-			test_state.advance_to_new_round(&mut virtual_overseer, true).await;
+			let old_relay_parent = test_state.relay_parent;
+			test_state.relay_parent.randomize();
+
+			// Update our view.
+			update_view(
+				&test_state,
+				&mut virtual_overseer,
+				vec![(old_relay_parent, 10), (test_state.relay_parent, 20)],
+				1,
+			)
+			.await;
 			test_state.group_rotation_info = test_state.group_rotation_info.bump_rotation();
 
 			let head_b = test_state.relay_parent;
@@ -1596,7 +1489,7 @@ fn connect_to_buffered_groups() {
 				}
 			);
 
-			TestHarness { virtual_overseer, req_v1_cfg: req_cfg, req_v2_cfg }
+			TestHarness { virtual_overseer, req_v2_cfg: req_cfg }
 		},
 	);
 }
diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs
index 348feb9dd1dbc01641556b25c813e4f4c205c870..a5c74a1205c9ef5ef3d2e6d3d55a7e54c912c9f6 100644
--- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs
+++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs
@@ -19,17 +19,16 @@
 use super::*;
 
 use polkadot_node_subsystem::messages::ChainApiMessage;
-use polkadot_primitives::{AsyncBackingParams, Header};
-
-const ASYNC_BACKING_PARAMETERS: AsyncBackingParams =
-	AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 };
+use polkadot_primitives::Header;
+use rstest::rstest;
 
 fn get_parent_hash(hash: Hash) -> Hash {
 	Hash::from_low_u64_be(hash.to_low_u64_be() + 1)
 }
 
 /// Handle a view update.
-async fn update_view(
+pub(super) async fn update_view(
+	test_state: &TestState,
 	virtual_overseer: &mut VirtualOverseer,
 	new_view: Vec<(Hash, u32)>, // Hash and block number.
 	activated: u8,              // How many new heads does this update contain?
@@ -45,18 +44,19 @@ async fn update_view(
 	.await;
 
 	for _ in 0..activated {
+		// obtain the claim queue schedule.
 		let (leaf_hash, leaf_number) = assert_matches!(
 			overseer_recv(virtual_overseer).await,
 			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
 				parent,
-				RuntimeApiRequest::AsyncBackingParams(tx),
+				RuntimeApiRequest::ClaimQueue(tx),
 			)) => {
-				tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap();
+				tx.send(Ok(test_state.claim_queue.clone())).unwrap();
 				(parent, new_view.get(&parent).copied().expect("Unknown parent requested"))
 			}
 		);
 
-		let min_number = leaf_number.saturating_sub(ASYNC_BACKING_PARAMETERS.allowed_ancestry_len);
+		let min_number = leaf_number.saturating_sub(SCHEDULING_LOOKAHEAD as u32 - 1);
 
 		let ancestry_len = leaf_number + 1 - min_number;
 		let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h)))
@@ -85,12 +85,12 @@ async fn update_view(
 				AllMessages::RuntimeApi(
 					RuntimeApiMessage::Request(
 						..,
-						RuntimeApiRequest::AsyncBackingParams(
+						RuntimeApiRequest::SessionIndexForChild(
 							tx
 						)
 					)
 				) => {
-					tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap();
+					tx.send(Ok(1)).unwrap();
 				}
 			);
 
@@ -99,12 +99,14 @@ async fn update_view(
 				AllMessages::RuntimeApi(
 					RuntimeApiMessage::Request(
 						..,
-						RuntimeApiRequest::SessionIndexForChild(
+						RuntimeApiRequest::SchedulingLookahead(
+							session_index,
 							tx
 						)
 					)
 				) => {
-					tx.send(Ok(1)).unwrap();
+					assert_eq!(session_index, 1);
+					tx.send(Ok(SCHEDULING_LOOKAHEAD as u32)).unwrap();
 				}
 			);
 
@@ -117,9 +119,10 @@ async fn update_view(
 						..
 					}
 				) => {
-					assert_eq!(k, ASYNC_BACKING_PARAMETERS.allowed_ancestry_len as usize);
-
-					tx.send(Ok(ancestry_hashes.clone().skip(1).into_iter().collect())).unwrap();
+					assert_eq!(k, SCHEDULING_LOOKAHEAD - 1);
+					let hashes: Vec<_> = ancestry_hashes.clone().skip(1).into_iter().collect();
+					assert_eq!(k, hashes.len());
+					tx.send(Ok(hashes)).unwrap();
 				}
 			);
 		}
@@ -140,10 +143,11 @@ async fn update_view(
 			);
 		}
 
-		while let Some((hash, number)) = ancestry_iter.next() {
+		let mut iter_clone = ancestry_iter.clone();
+		while let Some((hash, number)) = iter_clone.next() {
 			// May be `None` for the last element.
 			let parent_hash =
-				ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash));
+				iter_clone.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash));
 
 			let Some(msg) =
 				overseer_peek_with_timeout(virtual_overseer, Duration::from_millis(50)).await
@@ -175,11 +179,40 @@ async fn update_view(
 				}
 			);
 		}
+
+		for _ in ancestry_iter {
+			let Some(msg) =
+				overseer_peek_with_timeout(virtual_overseer, Duration::from_millis(50)).await
+			else {
+				return
+			};
+
+			if !matches!(
+				&msg,
+				AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+					_,
+					RuntimeApiRequest::ClaimQueue(_)
+				))
+			) {
+				// Claim queue has already been fetched for this leaf.
+				break
+			}
+
+			assert_matches!(
+				overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(50)).await.unwrap(),
+				AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+					_,
+					RuntimeApiRequest::ClaimQueue(tx),
+				)) => {
+					tx.send(Ok(test_state.claim_queue.clone())).unwrap();
+				}
+			);
+		}
 	}
 }
 
 /// Check that the next received message is a `Declare` message.
-pub(super) async fn expect_declare_msg_v2(
+pub(super) async fn expect_declare_msg(
 	virtual_overseer: &mut VirtualOverseer,
 	test_state: &TestState,
 	peer: &PeerId,
@@ -214,8 +247,12 @@ pub(super) async fn expect_declare_msg_v2(
 
 /// Test that a collator distributes a collation from the allowed ancestry
 /// to correct validators group.
-#[test]
-fn distribute_collation_from_implicit_view() {
+/// Run once with validators sending their view first and then the collator setting their own
+/// view first.
+#[rstest]
+#[case(true)]
+#[case(false)]
+fn distribute_collation_from_implicit_view(#[case] validator_sends_view_first: bool) {
 	let head_a = Hash::from_low_u64_be(126);
 	let head_a_num: u32 = 66;
 
@@ -227,163 +264,160 @@ fn distribute_collation_from_implicit_view() {
 	let head_c = Hash::from_low_u64_be(130);
 	let head_c_num = 62;
 
-	// Run once with validators sending their view first and then the collator setting their own
-	// view first.
-	for validator_sends_view_first in [true, false] {
-		let group_rotation_info = GroupRotationInfo {
-			session_start_block: head_c_num - 2,
-			group_rotation_frequency: 3,
-			now: head_c_num,
-		};
-
-		let mut test_state = TestState::default();
-		test_state.group_rotation_info = group_rotation_info;
-
-		let local_peer_id = test_state.local_peer_id;
-		let collator_pair = test_state.collator_pair.clone();
-
-		test_harness(
-			local_peer_id,
-			collator_pair,
-			ReputationAggregator::new(|_| true),
-			|mut test_harness| async move {
-				let virtual_overseer = &mut test_harness.virtual_overseer;
-
-				// Set collating para id.
-				overseer_send(
+	let group_rotation_info = GroupRotationInfo {
+		session_start_block: head_c_num - 2,
+		group_rotation_frequency: 3,
+		now: head_c_num,
+	};
+
+	let mut test_state = TestState::default();
+	test_state.group_rotation_info = group_rotation_info;
+
+	let local_peer_id = test_state.local_peer_id;
+	let collator_pair = test_state.collator_pair.clone();
+
+	test_harness(
+		local_peer_id,
+		collator_pair,
+		ReputationAggregator::new(|_| true),
+		|mut test_harness| async move {
+			let virtual_overseer = &mut test_harness.virtual_overseer;
+
+			// Set collating para id.
+			overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id))
+				.await;
+
+			if validator_sends_view_first {
+				// Activate leaf `c` to accept at least the collation.
+				update_view(&test_state, virtual_overseer, vec![(head_c, head_c_num)], 1).await;
+			} else {
+				// Activated leaf is `b`, but the collation will be based on `c`.
+				update_view(&test_state, virtual_overseer, vec![(head_b, head_b_num)], 1).await;
+			}
+
+			let validator_peer_ids = test_state.current_group_validator_peer_ids();
+			for (val, peer) in test_state
+				.current_group_validator_authority_ids()
+				.into_iter()
+				.zip(validator_peer_ids.clone())
+			{
+				connect_peer(virtual_overseer, peer, CollationVersion::V2, Some(val.clone())).await;
+			}
+
+			// Collator declared itself to each peer.
+			for peer_id in &validator_peer_ids {
+				expect_declare_msg(virtual_overseer, &test_state, peer_id).await;
+			}
+
+			let pov = PoV { block_data: BlockData(vec![1, 2, 3]) };
+			let parent_head_data_hash = Hash::repeat_byte(0xAA);
+			let candidate = TestCandidateBuilder {
+				para_id: test_state.para_id,
+				relay_parent: head_c,
+				pov_hash: pov.hash(),
+				..Default::default()
+			}
+			.build();
+			let DistributeCollation { candidate, pov_block: _ } =
+				distribute_collation_with_receipt(
 					virtual_overseer,
-					CollatorProtocolMessage::CollateOn(test_state.para_id),
+					&test_state,
+					head_c,
+					false, // Check the group manually.
+					candidate,
+					pov,
+					parent_head_data_hash,
 				)
 				.await;
+			assert_matches!(
+				overseer_recv(virtual_overseer).await,
+				AllMessages::NetworkBridgeTx(
+					NetworkBridgeTxMessage::ConnectToValidators { validator_ids, .. }
+				) => {
+					let expected_validators = test_state.current_group_validator_authority_ids();
 
-				if validator_sends_view_first {
-					// Activate leaf `c` to accept at least the collation.
-					update_view(virtual_overseer, vec![(head_c, head_c_num)], 1).await;
-				} else {
-					// Activated leaf is `b`, but the collation will be based on `c`.
-					update_view(virtual_overseer, vec![(head_b, head_b_num)], 1).await;
+					assert_eq!(expected_validators, validator_ids);
 				}
+			);
 
-				let validator_peer_ids = test_state.current_group_validator_peer_ids();
-				for (val, peer) in test_state
-					.current_group_validator_authority_ids()
-					.into_iter()
-					.zip(validator_peer_ids.clone())
-				{
-					connect_peer(virtual_overseer, peer, CollationVersion::V2, Some(val.clone()))
-						.await;
-				}
+			let candidate_hash = candidate.hash();
 
-				// Collator declared itself to each peer.
-				for peer_id in &validator_peer_ids {
-					expect_declare_msg_v2(virtual_overseer, &test_state, peer_id).await;
-				}
+			// Update peer views.
+			for peer_id in &validator_peer_ids {
+				send_peer_view_change(virtual_overseer, peer_id, vec![head_b]).await;
 
-				let pov = PoV { block_data: BlockData(vec![1, 2, 3]) };
-				let parent_head_data_hash = Hash::repeat_byte(0xAA);
-				let candidate = TestCandidateBuilder {
-					para_id: test_state.para_id,
-					relay_parent: head_c,
-					pov_hash: pov.hash(),
-					..Default::default()
-				}
-				.build();
-				let DistributeCollation { candidate, pov_block: _ } =
-					distribute_collation_with_receipt(
+				if !validator_sends_view_first {
+					expect_advertise_collation_msg(
 						virtual_overseer,
-						&test_state,
+						&[*peer_id],
 						head_c,
-						false, // Check the group manually.
-						candidate,
-						pov,
-						parent_head_data_hash,
+						vec![candidate_hash],
 					)
 					.await;
-				assert_matches!(
-					overseer_recv(virtual_overseer).await,
-					AllMessages::NetworkBridgeTx(
-						NetworkBridgeTxMessage::ConnectToValidators { validator_ids, .. }
-					) => {
-						let expected_validators = test_state.current_group_validator_authority_ids();
-
-						assert_eq!(expected_validators, validator_ids);
-					}
-				);
-
-				let candidate_hash = candidate.hash();
-
-				// Update peer views.
-				for peer_id in &validator_peer_ids {
-					send_peer_view_change(virtual_overseer, peer_id, vec![head_b]).await;
-
-					if !validator_sends_view_first {
-						expect_advertise_collation_msg(
-							virtual_overseer,
-							&[*peer_id],
-							head_c,
-							Some(vec![candidate_hash]),
-						)
-						.await;
-					}
 				}
+			}
 
-				if validator_sends_view_first {
-					// Activated leaf is `b`, but the collation will be based on `c`.
-					update_view(virtual_overseer, vec![(head_b, head_b_num)], 1).await;
+			if validator_sends_view_first {
+				// Activated leaf is `b`, but the collation will be based on `c`.
+				update_view(&test_state, virtual_overseer, vec![(head_b, head_b_num)], 1).await;
 
-					for _ in &validator_peer_ids {
-						expect_advertise_collation_msg(
-							virtual_overseer,
-							&validator_peer_ids,
-							head_c,
-							Some(vec![candidate_hash]),
-						)
-						.await;
-					}
+				for _ in &validator_peer_ids {
+					expect_advertise_collation_msg(
+						virtual_overseer,
+						&validator_peer_ids,
+						head_c,
+						vec![candidate_hash],
+					)
+					.await;
 				}
+			}
 
-				// Head `c` goes out of view.
-				// Build a different candidate for this relay parent and attempt to distribute it.
-				update_view(virtual_overseer, vec![(head_a, head_a_num)], 1).await;
+			// Head `c` goes out of view.
+			// Build a different candidate for this relay parent and attempt to distribute it.
+			update_view(&test_state, virtual_overseer, vec![(head_a, head_a_num)], 1).await;
 
-				let pov = PoV { block_data: BlockData(vec![4, 5, 6]) };
-				let parent_head_data_hash = Hash::repeat_byte(0xBB);
-				let candidate = TestCandidateBuilder {
-					para_id: test_state.para_id,
-					relay_parent: head_c,
-					pov_hash: pov.hash(),
-					..Default::default()
-				}
-				.build();
-				overseer_send(
-					virtual_overseer,
-					CollatorProtocolMessage::DistributeCollation {
-						candidate_receipt: candidate.clone(),
-						parent_head_data_hash,
-						pov: pov.clone(),
-						parent_head_data: HeadData(vec![1, 2, 3]),
-						result_sender: None,
-						core_index: CoreIndex(0),
-					},
-				)
-				.await;
+			let pov = PoV { block_data: BlockData(vec![4, 5, 6]) };
+			let parent_head_data_hash = Hash::repeat_byte(0xBB);
+			let candidate = TestCandidateBuilder {
+				para_id: test_state.para_id,
+				relay_parent: head_c,
+				pov_hash: pov.hash(),
+				..Default::default()
+			}
+			.build();
+			overseer_send(
+				virtual_overseer,
+				CollatorProtocolMessage::DistributeCollation {
+					candidate_receipt: candidate.clone(),
+					parent_head_data_hash,
+					pov: pov.clone(),
+					parent_head_data: HeadData(vec![1, 2, 3]),
+					result_sender: None,
+					core_index: CoreIndex(0),
+				},
+			)
+			.await;
 
-				// Parent out of view, nothing happens.
-				assert!(overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(100))
-					.await
-					.is_none());
+			// Parent out of view, nothing happens.
+			assert!(overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(100))
+				.await
+				.is_none());
 
-				test_harness
-			},
-		);
-	}
+			test_harness
+		},
+	);
 }
 
-/// Tests that collator can distribute up to `MAX_CANDIDATE_DEPTH + 1` candidates
-/// per relay parent.
+/// Tests that collator respects the per relay parent limit of collations, which is equal to the
+/// number of assignments they have in the claim queue for that core.
 #[test]
 fn distribute_collation_up_to_limit() {
-	let test_state = TestState::default();
+	let mut test_state = TestState::default();
+	// Claim queue has 4 assignments for our paraid on core 0, 1 assignment for another paraid on
+	// core 1. Let's replace one of our assignments on core 0.
+
+	*test_state.claim_queue.get_mut(&CoreIndex(0)).unwrap().get_mut(1).unwrap() = ParaId::from(3);
+	let expected_assignments = SCHEDULING_LOOKAHEAD - 1;
 
 	let local_peer_id = test_state.local_peer_id;
 	let collator_pair = test_state.collator_pair.clone();
@@ -405,15 +439,16 @@ fn distribute_collation_up_to_limit() {
 			overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id))
 				.await;
 			// Activated leaf is `a`, but the collation will be based on `b`.
-			update_view(virtual_overseer, vec![(head_a, head_a_num)], 1).await;
+			update_view(&test_state, virtual_overseer, vec![(head_a, head_a_num)], 1).await;
 
-			for i in 0..(ASYNC_BACKING_PARAMETERS.max_candidate_depth + 1) {
+			for i in 0..expected_assignments {
 				let pov = PoV { block_data: BlockData(vec![i as u8]) };
 				let parent_head_data_hash = Hash::repeat_byte(0xAA);
 				let candidate = TestCandidateBuilder {
 					para_id: test_state.para_id,
 					relay_parent: head_b,
 					pov_hash: pov.hash(),
+					core_index: CoreIndex(0),
 					..Default::default()
 				}
 				.build();
@@ -435,6 +470,7 @@ fn distribute_collation_up_to_limit() {
 				para_id: test_state.para_id,
 				relay_parent: head_b,
 				pov_hash: pov.hash(),
+				core_index: CoreIndex(0),
 				..Default::default()
 			}
 			.build();
@@ -452,6 +488,35 @@ fn distribute_collation_up_to_limit() {
 			.await;
 
 			// Limit has been reached.
+			assert!(overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(100))
+				.await
+				.is_none());
+
+			// Let's also try on core 1, where we don't have any assignments.
+
+			let pov = PoV { block_data: BlockData(vec![10, 12, 6]) };
+			let parent_head_data_hash = Hash::repeat_byte(0xBB);
+			let candidate = TestCandidateBuilder {
+				para_id: test_state.para_id,
+				relay_parent: head_b,
+				pov_hash: pov.hash(),
+				core_index: CoreIndex(1),
+				..Default::default()
+			}
+			.build();
+			overseer_send(
+				virtual_overseer,
+				CollatorProtocolMessage::DistributeCollation {
+					candidate_receipt: candidate.clone(),
+					parent_head_data_hash,
+					pov: pov.clone(),
+					parent_head_data: HeadData(vec![1, 2, 3]),
+					result_sender: None,
+					core_index: CoreIndex(1),
+				},
+			)
+			.await;
+
 			assert!(overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(100))
 				.await
 				.is_none());
@@ -476,7 +541,6 @@ fn send_parent_head_data_for_elastic_scaling() {
 		ReputationAggregator::new(|_| true),
 		|test_harness| async move {
 			let mut virtual_overseer = test_harness.virtual_overseer;
-			let req_v1_cfg = test_harness.req_v1_cfg;
 			let mut req_v2_cfg = test_harness.req_v2_cfg;
 
 			let head_b = Hash::from_low_u64_be(129);
@@ -488,7 +552,7 @@ fn send_parent_head_data_for_elastic_scaling() {
 				CollatorProtocolMessage::CollateOn(test_state.para_id),
 			)
 			.await;
-			update_view(&mut virtual_overseer, vec![(head_b, head_b_num)], 1).await;
+			update_view(&test_state, &mut virtual_overseer, vec![(head_b, head_b_num)], 1).await;
 
 			let pov_data = PoV { block_data: BlockData(vec![1 as u8]) };
 			let candidate = TestCandidateBuilder {
@@ -522,12 +586,11 @@ fn send_parent_head_data_for_elastic_scaling() {
 				Some(validator_id.clone()),
 			)
 			.await;
-			expect_declare_msg_v2(&mut virtual_overseer, &test_state, &peer).await;
+			expect_declare_msg(&mut virtual_overseer, &test_state, &peer).await;
 
 			send_peer_view_change(&mut virtual_overseer, &peer, vec![head_b]).await;
 			let hashes: Vec<_> = vec![candidate.hash()];
-			expect_advertise_collation_msg(&mut virtual_overseer, &[peer], head_b, Some(hashes))
-				.await;
+			expect_advertise_collation_msg(&mut virtual_overseer, &[peer], head_b, hashes).await;
 
 			let (pending_response, rx) = oneshot::channel();
 			req_v2_cfg
@@ -536,7 +599,7 @@ fn send_parent_head_data_for_elastic_scaling() {
 				.unwrap()
 				.send(RawIncomingRequest {
 					peer,
-					payload: request_v2::CollationFetchingRequest {
+					payload: CollationFetchingRequest {
 						relay_parent: head_b,
 						para_id: test_state.para_id,
 						candidate_hash: candidate.hash(),
@@ -550,14 +613,14 @@ fn send_parent_head_data_for_elastic_scaling() {
 			assert_matches!(
 				rx.await,
 				Ok(full_response) => {
-					let response: request_v2::CollationFetchingResponse =
-						request_v2::CollationFetchingResponse::decode(&mut
-							full_response.result
+					let response: CollationFetchingResponse =
+						CollationFetchingResponse::decode(
+							&mut full_response.result
 							.expect("We should have a proper answer").as_ref()
 						).expect("Decoding should work");
 						assert_matches!(
 							response,
-							request_v1::CollationFetchingResponse::CollationWithParentHeadData {
+							CollationFetchingResponse::CollationWithParentHeadData {
 								receipt, pov, parent_head_data
 							} => {
 								assert_eq!(receipt, candidate);
@@ -568,7 +631,7 @@ fn send_parent_head_data_for_elastic_scaling() {
 				}
 			);
 
-			TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg }
+			TestHarness { virtual_overseer, req_v2_cfg }
 		},
 	)
 }
@@ -587,7 +650,6 @@ fn advertise_and_send_collation_by_hash() {
 		ReputationAggregator::new(|_| true),
 		|test_harness| async move {
 			let mut virtual_overseer = test_harness.virtual_overseer;
-			let req_v1_cfg = test_harness.req_v1_cfg;
 			let mut req_v2_cfg = test_harness.req_v2_cfg;
 
 			let head_a = Hash::from_low_u64_be(128);
@@ -603,8 +665,8 @@ fn advertise_and_send_collation_by_hash() {
 				CollatorProtocolMessage::CollateOn(test_state.para_id),
 			)
 			.await;
-			update_view(&mut virtual_overseer, vec![(head_b, head_b_num)], 1).await;
-			update_view(&mut virtual_overseer, vec![(head_a, head_a_num)], 1).await;
+			update_view(&test_state, &mut virtual_overseer, vec![(head_b, head_b_num)], 1).await;
+			update_view(&test_state, &mut virtual_overseer, vec![(head_a, head_a_num)], 1).await;
 
 			let candidates: Vec<_> = (0..2)
 				.map(|i| {
@@ -641,13 +703,12 @@ fn advertise_and_send_collation_by_hash() {
 				Some(validator_id.clone()),
 			)
 			.await;
-			expect_declare_msg_v2(&mut virtual_overseer, &test_state, &peer).await;
+			expect_declare_msg(&mut virtual_overseer, &test_state, &peer).await;
 
 			// Head `b` is not a leaf, but both advertisements are still relevant.
 			send_peer_view_change(&mut virtual_overseer, &peer, vec![head_b]).await;
 			let hashes: Vec<_> = candidates.iter().map(|(candidate, _)| candidate.hash()).collect();
-			expect_advertise_collation_msg(&mut virtual_overseer, &[peer], head_b, Some(hashes))
-				.await;
+			expect_advertise_collation_msg(&mut virtual_overseer, &[peer], head_b, hashes).await;
 
 			for (candidate, pov_block) in candidates {
 				let (pending_response, rx) = oneshot::channel();
@@ -657,7 +718,7 @@ fn advertise_and_send_collation_by_hash() {
 					.unwrap()
 					.send(RawIncomingRequest {
 						peer,
-						payload: request_v2::CollationFetchingRequest {
+						payload: CollationFetchingRequest {
 							relay_parent: head_b,
 							para_id: test_state.para_id,
 							candidate_hash: candidate.hash(),
@@ -682,7 +743,7 @@ fn advertise_and_send_collation_by_hash() {
 				);
 			}
 
-			TestHarness { virtual_overseer, req_v1_cfg, req_v2_cfg }
+			TestHarness { virtual_overseer, req_v2_cfg }
 		},
 	)
 }
diff --git a/polkadot/node/network/collator-protocol/src/lib.rs b/polkadot/node/network/collator-protocol/src/lib.rs
index 1edc67664172400503158f8a451756ca3674a3d9..79b5719ba3a448e6ce6e4bcac40a8924359eb59c 100644
--- a/polkadot/node/network/collator-protocol/src/lib.rs
+++ b/polkadot/node/network/collator-protocol/src/lib.rs
@@ -32,7 +32,7 @@ use polkadot_node_subsystem_util::reputation::ReputationAggregator;
 use sp_keystore::KeystorePtr;
 
 use polkadot_node_network_protocol::{
-	request_response::{v1 as request_v1, v2 as protocol_v2, IncomingRequestReceiver},
+	request_response::{v2 as protocol_v2, IncomingRequestReceiver},
 	PeerId, UnifiedReputationChange as Rep,
 };
 use polkadot_primitives::CollatorPair;
@@ -81,8 +81,6 @@ pub enum ProtocolSide {
 		peer_id: PeerId,
 		/// Parachain collator pair.
 		collator_pair: CollatorPair,
-		/// Receiver for v1 collation fetching requests.
-		request_receiver_v1: IncomingRequestReceiver<request_v1::CollationFetchingRequest>,
 		/// Receiver for v2 collation fetching requests.
 		request_receiver_v2: IncomingRequestReceiver<protocol_v2::CollationFetchingRequest>,
 		/// Metrics.
@@ -116,22 +114,10 @@ impl<Context> CollatorProtocolSubsystem {
 				validator_side::run(ctx, keystore, eviction_policy, metrics)
 					.map_err(|e| SubsystemError::with_origin("collator-protocol", e))
 					.boxed(),
-			ProtocolSide::Collator {
-				peer_id,
-				collator_pair,
-				request_receiver_v1,
-				request_receiver_v2,
-				metrics,
-			} => collator_side::run(
-				ctx,
-				peer_id,
-				collator_pair,
-				request_receiver_v1,
-				request_receiver_v2,
-				metrics,
-			)
-			.map_err(|e| SubsystemError::with_origin("collator-protocol", e))
-			.boxed(),
+			ProtocolSide::Collator { peer_id, collator_pair, request_receiver_v2, metrics } =>
+				collator_side::run(ctx, peer_id, collator_pair, request_receiver_v2, metrics)
+					.map_err(|e| SubsystemError::with_origin("collator-protocol", e))
+					.boxed(),
 			ProtocolSide::None => return DummySubsystem.start(ctx),
 		};
 
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/claim_queue_state.rs b/polkadot/node/network/collator-protocol/src/validator_side/claim_queue_state.rs
index 3a34cf52fec6e897c854a022af6bf97107929d6a..d677da1ac4f03329bf77df2a85c355b96f8aeb9d 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/claim_queue_state.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/claim_queue_state.rs
@@ -155,7 +155,7 @@ impl ClaimQueueState {
 	fn get_window<'a>(
 		&'a mut self,
 		relay_parent: &'a Hash,
-	) -> impl Iterator<Item = &mut ClaimInfo> + 'a {
+	) -> impl Iterator<Item = &'a mut ClaimInfo> + 'a {
 		let mut window = self
 			.block_state
 			.iter_mut()
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
index 5f5effcde9a8cd4f2d03a8b38da7591aa27fc2ba..93a8c31168c8980a6159121e9ab6d84fccbfb6d5 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs
@@ -49,14 +49,14 @@ use polkadot_node_subsystem::{
 use polkadot_node_subsystem_util::{
 	backing_implicit_view::View as ImplicitView,
 	reputation::{ReputationAggregator, REPUTATION_CHANGE_INTERVAL},
-	request_async_backing_params, request_claim_queue, request_session_index_for_child,
-	runtime::{recv_runtime, request_node_features},
+	request_claim_queue, request_session_index_for_child,
+	runtime::request_node_features,
 };
 use polkadot_primitives::{
 	node_features,
 	vstaging::{CandidateDescriptorV2, CandidateDescriptorVersion},
-	AsyncBackingParams, CandidateHash, CollatorId, CoreIndex, Hash, HeadData, Id as ParaId,
-	OccupiedCoreAssumption, PersistedValidationData, SessionIndex,
+	CandidateHash, CollatorId, CoreIndex, Hash, HeadData, Id as ParaId, OccupiedCoreAssumption,
+	PersistedValidationData, SessionIndex,
 };
 
 use crate::error::{Error, FetchError, Result, SecondingError};
@@ -166,7 +166,7 @@ impl PeerData {
 	fn update_view(
 		&mut self,
 		implicit_view: &ImplicitView,
-		active_leaves: &HashMap<Hash, AsyncBackingParams>,
+		active_leaves: &HashSet<Hash>,
 		new_view: View,
 	) {
 		let old_view = std::mem::replace(&mut self.view, new_view);
@@ -191,7 +191,7 @@ impl PeerData {
 	fn prune_old_advertisements(
 		&mut self,
 		implicit_view: &ImplicitView,
-		active_leaves: &HashMap<Hash, AsyncBackingParams>,
+		active_leaves: &HashSet<Hash>,
 	) {
 		if let PeerState::Collating(ref mut peer_state) = self.state {
 			peer_state.advertisements.retain(|hash, _| {
@@ -215,7 +215,7 @@ impl PeerData {
 		on_relay_parent: Hash,
 		candidate_hash: Option<CandidateHash>,
 		implicit_view: &ImplicitView,
-		active_leaves: &HashMap<Hash, AsyncBackingParams>,
+		active_leaves: &HashSet<Hash>,
 		per_relay_parent: &PerRelayParent,
 	) -> std::result::Result<(CollatorId, ParaId), InsertAdvertisementError> {
 		match self.state {
@@ -365,10 +365,10 @@ struct State {
 	/// ancestry of some active leaf, then it does support prospective parachains.
 	implicit_view: ImplicitView,
 
-	/// All active leaves observed by us. This mapping works as a replacement for
+	/// All active leaves observed by us. This works as a replacement for
 	/// [`polkadot_node_network_protocol::View`] and can be dropped once the transition
 	/// to asynchronous backing is done.
-	active_leaves: HashMap<Hash, AsyncBackingParams>,
+	active_leaves: HashSet<Hash>,
 
 	/// State tracked per relay parent.
 	per_relay_parent: HashMap<Hash, PerRelayParent>,
@@ -465,10 +465,10 @@ impl State {
 fn is_relay_parent_in_implicit_view(
 	relay_parent: &Hash,
 	implicit_view: &ImplicitView,
-	active_leaves: &HashMap<Hash, AsyncBackingParams>,
+	active_leaves: &HashSet<Hash>,
 	para_id: ParaId,
 ) -> bool {
-	active_leaves.iter().any(|(hash, _)| {
+	active_leaves.iter().any(|hash| {
 		implicit_view
 			.known_allowed_relay_parents_under(hash, Some(para_id))
 			.unwrap_or_default()
@@ -1118,8 +1118,7 @@ where
 {
 	let peer_data = state.peer_data.get_mut(&peer_id).ok_or(AdvertisementError::UnknownPeer)?;
 
-	if peer_data.version == CollationVersion::V1 && !state.active_leaves.contains_key(&relay_parent)
-	{
+	if peer_data.version == CollationVersion::V1 && !state.active_leaves.contains(&relay_parent) {
 		return Err(AdvertisementError::ProtocolMisuse)
 	}
 
@@ -1274,8 +1273,8 @@ where
 {
 	let current_leaves = state.active_leaves.clone();
 
-	let removed = current_leaves.iter().filter(|(h, _)| !view.contains(h));
-	let added = view.iter().filter(|h| !current_leaves.contains_key(h));
+	let removed = current_leaves.iter().filter(|h| !view.contains(h));
+	let added = view.iter().filter(|h| !current_leaves.contains(h));
 
 	for leaf in added {
 		let session_index = request_session_index_for_child(*leaf, sender)
@@ -1283,9 +1282,6 @@ where
 			.await
 			.map_err(Error::CancelledSessionIndex)??;
 
-		let async_backing_params =
-			recv_runtime(request_async_backing_params(*leaf, sender).await).await?;
-
 		let v2_receipts = request_node_features(*leaf, session_index, sender)
 			.await?
 			.unwrap_or_default()
@@ -1306,7 +1302,7 @@ where
 			continue
 		};
 
-		state.active_leaves.insert(*leaf, async_backing_params);
+		state.active_leaves.insert(*leaf);
 		state.per_relay_parent.insert(*leaf, per_relay_parent);
 
 		state
@@ -1340,7 +1336,7 @@ where
 		}
 	}
 
-	for (removed, _) in removed {
+	for removed in removed {
 		gum::trace!(
 			target: LOG_TARGET,
 			?view,
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs
index 5a2e135419dd2645f7efbea29ba86fc3441d4858..308aec578dee0ddc26f664f747ebf58ae5080a1c 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs
@@ -41,9 +41,9 @@ use polkadot_node_subsystem::messages::{
 use polkadot_node_subsystem_test_helpers as test_helpers;
 use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt};
 use polkadot_primitives::{
-	node_features, vstaging::CandidateReceiptV2 as CandidateReceipt, AsyncBackingParams,
-	CollatorPair, CoreIndex, GroupRotationInfo, HeadData, NodeFeatures, PersistedValidationData,
-	ValidatorId, ValidatorIndex,
+	node_features, vstaging::CandidateReceiptV2 as CandidateReceipt, CollatorPair, CoreIndex,
+	GroupRotationInfo, HeadData, NodeFeatures, PersistedValidationData, ValidatorId,
+	ValidatorIndex,
 };
 use polkadot_primitives_test_helpers::{dummy_candidate_receipt_bad_sig, dummy_hash};
 
@@ -71,7 +71,7 @@ struct TestState {
 	validator_groups: Vec<Vec<ValidatorIndex>>,
 	group_rotation_info: GroupRotationInfo,
 	claim_queue: BTreeMap<CoreIndex, VecDeque<ParaId>>,
-	async_backing_params: AsyncBackingParams,
+	scheduling_lookahead: u32,
 	node_features: NodeFeatures,
 	session_index: SessionIndex,
 	// Used by `update_view` to keep track of latest requested ancestor
@@ -101,18 +101,19 @@ impl Default for TestState {
 		let group_rotation_info =
 			GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 1, now: 0 };
 
+		let scheduling_lookahead = 3;
 		let mut claim_queue = BTreeMap::new();
 		claim_queue.insert(
 			CoreIndex(0),
 			iter::repeat(ParaId::from(Self::CHAIN_IDS[0]))
-				.take(Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize)
+				.take(scheduling_lookahead as usize)
 				.collect(),
 		);
 		claim_queue.insert(CoreIndex(1), VecDeque::new());
 		claim_queue.insert(
 			CoreIndex(2),
 			iter::repeat(ParaId::from(Self::CHAIN_IDS[1]))
-				.take(Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize)
+				.take(scheduling_lookahead as usize)
 				.collect(),
 		);
 
@@ -128,7 +129,7 @@ impl Default for TestState {
 			validator_groups,
 			group_rotation_info,
 			claim_queue,
-			async_backing_params: Self::ASYNC_BACKING_PARAMS,
+			scheduling_lookahead,
 			node_features,
 			session_index: 1,
 			last_known_block: None,
@@ -138,8 +139,6 @@ impl Default for TestState {
 
 impl TestState {
 	const CHAIN_IDS: [u32; 2] = [1, 2];
-	const ASYNC_BACKING_PARAMS: AsyncBackingParams =
-		AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 };
 
 	fn with_shared_core() -> Self {
 		let mut state = Self::default();
@@ -159,8 +158,7 @@ impl TestState {
 		state.validator_groups.truncate(1);
 
 		assert!(
-			claim_queue.get(&CoreIndex(0)).unwrap().len() ==
-				Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize
+			claim_queue.get(&CoreIndex(0)).unwrap().len() == state.scheduling_lookahead as usize
 		);
 
 		state.claim_queue = claim_queue;
@@ -187,8 +185,7 @@ impl TestState {
 		);
 
 		assert!(
-			claim_queue.get(&CoreIndex(0)).unwrap().len() ==
-				Self::ASYNC_BACKING_PARAMS.allowed_ancestry_len as usize
+			claim_queue.get(&CoreIndex(0)).unwrap().len() == state.scheduling_lookahead as usize
 		);
 
 		state.validator_groups = validator_groups;
diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs
index fac63aeb2097f745e46810d941e7f6e52a7034e7..0a00fb6f7b783d028187963ef1721561332acc21 100644
--- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs
+++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs
@@ -108,16 +108,6 @@ pub(super) async fn update_view(
 			}
 		);
 
-		assert_matches!(
-			overseer_recv(virtual_overseer).await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-				_,
-				RuntimeApiRequest::AsyncBackingParams(tx),
-			)) => {
-				tx.send(Ok(test_state.async_backing_params)).unwrap();
-			}
-		);
-
 		assert_matches!(
 			overseer_recv(virtual_overseer).await,
 			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
@@ -137,8 +127,7 @@ pub(super) async fn update_view(
 		)
 		.await;
 
-		let min_number =
-			leaf_number.saturating_sub(test_state.async_backing_params.allowed_ancestry_len);
+		let min_number = leaf_number.saturating_sub(test_state.scheduling_lookahead);
 
 		let ancestry_len = leaf_number + 1 - min_number;
 		let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h)))
@@ -730,8 +719,7 @@ fn second_multiple_candidates_per_relay_parent() {
 		)
 		.await;
 
-		// `allowed_ancestry_len` equals the size of the claim queue
-		for i in 0..test_state.async_backing_params.allowed_ancestry_len {
+		for i in 0..test_state.scheduling_lookahead {
 			submit_second_and_assert(
 				&mut virtual_overseer,
 				keystore.clone(),
@@ -2189,8 +2177,7 @@ fn claims_below_are_counted_correctly() {
 		),
 	);
 	test_state.claim_queue = claim_queue;
-	test_state.async_backing_params.max_candidate_depth = 3;
-	test_state.async_backing_params.allowed_ancestry_len = 2;
+	test_state.scheduling_lookahead = 2;
 
 	test_harness(ReputationAggregator::new(|_| true), |test_harness| async move {
 		let TestHarness { mut virtual_overseer, keystore } = test_harness;
@@ -2280,8 +2267,7 @@ fn claims_above_are_counted_correctly() {
 		),
 	);
 	test_state.claim_queue = claim_queue;
-	test_state.async_backing_params.max_candidate_depth = 3;
-	test_state.async_backing_params.allowed_ancestry_len = 2;
+	test_state.scheduling_lookahead = 2;
 
 	test_harness(ReputationAggregator::new(|_| true), |test_harness| async move {
 		let TestHarness { mut virtual_overseer, keystore } = test_harness;
@@ -2386,8 +2372,7 @@ fn claim_fills_last_free_slot() {
 		),
 	);
 	test_state.claim_queue = claim_queue;
-	test_state.async_backing_params.max_candidate_depth = 3;
-	test_state.async_backing_params.allowed_ancestry_len = 2;
+	test_state.scheduling_lookahead = 2;
 
 	test_harness(ReputationAggregator::new(|_| true), |test_harness| async move {
 		let TestHarness { mut virtual_overseer, keystore } = test_harness;
diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs
index bd6d4ebe755cde4d86be23a8cc24483741208fe9..06e52dbe3a4572b1511ff7046511f84294f75895 100644
--- a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs
+++ b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs
@@ -13,7 +13,7 @@
 
 // You should have received a copy of the GNU General Public License
 // along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
-
+#![allow(dead_code)]
 use codec::Encode;
 use net_protocol::{filter_by_peer_version, peer_set::ProtocolVersion};
 
diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs
index d2fd016ec2f1ef6408590d8cf4b3368442d03499..92acfa870154f58f34884215e00af56fc38b7045 100644
--- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs
+++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs
@@ -20,51 +20,27 @@ use super::*;
 use crate::{metrics::Metrics, *};
 
 use assert_matches::assert_matches;
-use codec::{Decode, Encode};
 use futures::executor;
-use futures_timer::Delay;
 use polkadot_node_network_protocol::{
-	grid_topology::{SessionGridTopology, TopologyPeerInfo},
-	peer_set::ValidationVersion,
-	request_response::{
-		v1::{StatementFetchingRequest, StatementFetchingResponse},
-		IncomingRequest, Recipient, ReqProtocolNames, Requests,
-	},
-	view, ObservedRole, VersionedValidationProtocol,
+	peer_set::ValidationVersion, view, VersionedValidationProtocol,
 };
 use polkadot_node_primitives::{
 	SignedFullStatementWithPVD, Statement, UncheckedSignedFullStatement,
 };
-use polkadot_node_subsystem::{
-	messages::{
-		network_bridge_event, AllMessages, ReportPeerMessage, RuntimeApiMessage, RuntimeApiRequest,
-	},
-	RuntimeApiError,
-};
-use polkadot_node_subsystem_test_helpers::mock::{make_ferdie_keystore, new_leaf};
-use polkadot_primitives::{
-	Block, ExecutorParams, GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, NodeFeatures,
-	SessionInfo, ValidationCode,
-};
+use polkadot_node_subsystem::messages::AllMessages;
+use polkadot_primitives::{GroupIndex, Hash, HeadData, Id as ParaId, IndexedVec, SessionInfo};
 use polkadot_primitives_test_helpers::{
-	dummy_committed_candidate_receipt, dummy_committed_candidate_receipt_v2, dummy_hash,
-	AlwaysZeroRng,
+	dummy_committed_candidate_receipt, dummy_hash, AlwaysZeroRng,
 };
 use sc_keystore::LocalKeystore;
-use sc_network::ProtocolName;
 use sp_application_crypto::{sr25519::Pair, AppCrypto, Pair as TraitPair};
-use sp_authority_discovery::AuthorityPair;
 use sp_keyring::Sr25519Keyring;
 use sp_keystore::{Keystore, KeystorePtr};
-use std::{sync::Arc, time::Duration};
-use util::reputation::add_reputation;
+use std::sync::Arc;
 
 // Some deterministic genesis hash for protocol names
 const GENESIS_HASH: Hash = Hash::repeat_byte(0xff);
 
-const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError =
-	RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" };
-
 fn dummy_pvd() -> PersistedValidationData {
 	PersistedValidationData {
 		parent_head: HeadData(vec![7, 8, 9]),
@@ -737,2320 +713,6 @@ fn circulated_statement_goes_to_all_peers_with_view() {
 	});
 }
 
-#[test]
-fn receiving_from_one_sends_to_another_and_to_candidate_backing() {
-	const PARA_ID: ParaId = ParaId::new(1);
-	let hash_a = Hash::repeat_byte(1);
-	let pvd = dummy_pvd();
-
-	let candidate = {
-		let mut c = dummy_committed_candidate_receipt(dummy_hash());
-		c.descriptor.relay_parent = hash_a;
-		c.descriptor.para_id = PARA_ID;
-		c.into()
-	};
-
-	let peer_a = PeerId::random();
-	let peer_b = PeerId::random();
-
-	let validators = vec![
-		Sr25519Keyring::Alice.pair(),
-		Sr25519Keyring::Bob.pair(),
-		Sr25519Keyring::Charlie.pair(),
-	];
-
-	let session_info = make_session_info(validators, vec![]);
-
-	let session_index = 1;
-
-	let pool = sp_core::testing::TaskExecutor::new();
-	let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
-
-	let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
-	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver::<
-		Block,
-		sc_network::NetworkWorker<Block, Hash>,
-	>(&req_protocol_names);
-	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver::<
-		Block,
-		sc_network::NetworkWorker<Block, Hash>,
-	>(&req_protocol_names);
-
-	let bg = async move {
-		let s = StatementDistributionSubsystem {
-			keystore: Arc::new(LocalKeystore::in_memory()),
-			v1_req_receiver: Some(statement_req_receiver),
-			req_receiver: Some(candidate_req_receiver),
-			metrics: Default::default(),
-			rng: AlwaysZeroRng,
-			reputation: ReputationAggregator::new(|_| true),
-		};
-		s.run(ctx).await.unwrap();
-	};
-
-	let test_fut = async move {
-		// register our active heads.
-		handle
-			.send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
-				ActiveLeavesUpdate::start_work(new_leaf(hash_a, 1)),
-			)))
-			.await;
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx))
-			)
-				if r == hash_a
-			=> {
-				let _ = tx.send(Err(ASYNC_BACKING_DISABLED_ERROR));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
-			)
-				if r == hash_a
-			=> {
-				let _ = tx.send(Ok(session_index));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
-			)
-				if r == hash_a && sess_index == session_index
-			=> {
-				let _ = tx.send(Ok(Some(session_info)));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionExecutorParams(sess_index, tx))
-			)
-				if r == hash_a && sess_index == session_index
-			=> {
-				let _ = tx.send(Ok(Some(ExecutorParams::default())));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), )
-			) => {
-				si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap();
-			}
-		);
-
-		// notify of peers and view
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_a,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						None,
-					),
-				),
-			})
-			.await;
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_b,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						None,
-					),
-				),
-			})
-			.await;
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_a, view![hash_a]),
-				),
-			})
-			.await;
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_b, view![hash_a]),
-				),
-			})
-			.await;
-
-		// receive a seconded statement from peer A. it should be propagated onwards to peer B and
-		// to candidate backing.
-		let statement = {
-			let signing_context = SigningContext { parent_hash: hash_a, session_index };
-
-			let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory());
-			let alice_public = Keystore::sr25519_generate_new(
-				&*keystore,
-				ValidatorId::ID,
-				Some(&Sr25519Keyring::Alice.to_seed()),
-			)
-			.unwrap();
-
-			SignedFullStatement::sign(
-				&keystore,
-				Statement::Seconded(candidate),
-				&signing_context,
-				ValidatorIndex(0),
-				&alice_public.into(),
-			)
-			.ok()
-			.flatten()
-			.expect("should be signed")
-		};
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerMessage(
-						peer_a,
-						Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
-							hash_a,
-							statement.clone().into(),
-						)),
-					),
-				),
-			})
-			.await;
-
-		let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone());
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-				hash,
-				RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx),
-			)) if para_id == PARA_ID &&
-				assumption == OccupiedCoreAssumption::Free &&
-				hash == hash_a =>
-			{
-				tx.send(Ok(Some(pvd))).unwrap();
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))
-			) if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST.into() => {}
-		);
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::CandidateBacking(
-				CandidateBackingMessage::Statement(r, s)
-			) if r == hash_a && s == statement_with_pvd => {}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendValidationMessage(
-					recipients,
-					Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
-						protocol_v1::StatementDistributionMessage::Statement(r, s)
-					)),
-				)
-			) => {
-				assert_eq!(recipients, vec![peer_b]);
-				assert_eq!(r, hash_a);
-				assert_eq!(s, statement.into());
-			}
-		);
-		handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
-	};
-
-	futures::pin_mut!(test_fut);
-	futures::pin_mut!(bg);
-
-	executor::block_on(future::join(test_fut, bg));
-}
-
-#[test]
-fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing() {
-	const PARA_ID: ParaId = ParaId::new(1);
-	let pvd = dummy_pvd();
-
-	sp_tracing::try_init_simple();
-	let hash_a = Hash::repeat_byte(1);
-	let hash_b = Hash::repeat_byte(2);
-
-	let candidate = {
-		let mut c = dummy_committed_candidate_receipt(dummy_hash());
-		c.descriptor.relay_parent = hash_a;
-		c.descriptor.para_id = PARA_ID;
-		c.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
-		c
-	};
-
-	let peer_a = PeerId::random(); // Alice
-	let peer_b = PeerId::random(); // Bob
-	let peer_c = PeerId::random(); // Charlie
-	let peer_bad = PeerId::random(); // No validator
-
-	let validators = vec![
-		Sr25519Keyring::Alice.pair(),
-		Sr25519Keyring::Bob.pair(),
-		Sr25519Keyring::Charlie.pair(),
-		// We:
-		Sr25519Keyring::Ferdie.pair(),
-	];
-
-	let session_info = make_session_info(validators, vec![vec![0, 1, 2, 4], vec![3]]);
-
-	let session_index = 1;
-
-	let pool = sp_core::testing::TaskExecutor::new();
-	let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
-
-	let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
-	let (statement_req_receiver, mut req_cfg) = IncomingRequest::get_config_receiver::<
-		Block,
-		sc_network::NetworkWorker<Block, Hash>,
-	>(&req_protocol_names);
-	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver::<
-		Block,
-		sc_network::NetworkWorker<Block, Hash>,
-	>(&req_protocol_names);
-
-	let bg = async move {
-		let s = StatementDistributionSubsystem {
-			keystore: make_ferdie_keystore(),
-			v1_req_receiver: Some(statement_req_receiver),
-			req_receiver: Some(candidate_req_receiver),
-			metrics: Default::default(),
-			rng: AlwaysZeroRng,
-			reputation: ReputationAggregator::new(|_| true),
-		};
-		s.run(ctx).await.unwrap();
-	};
-
-	let test_fut = async move {
-		// register our active heads.
-		handle
-			.send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
-				ActiveLeavesUpdate::start_work(new_leaf(hash_a, 1)),
-			)))
-			.await;
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx))
-			)
-				if r == hash_a
-			=> {
-				let _ = tx.send(Err(ASYNC_BACKING_DISABLED_ERROR));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
-			)
-				if r == hash_a
-			=> {
-				let _ = tx.send(Ok(session_index));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
-			)
-				if r == hash_a && sess_index == session_index
-			=> {
-				let _ = tx.send(Ok(Some(session_info)));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionExecutorParams(sess_index, tx))
-			)
-				if r == hash_a && sess_index == session_index
-			=> {
-				let _ = tx.send(Ok(Some(ExecutorParams::default())));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), )
-			) => {
-				si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap();
-			}
-		);
-
-		// notify of peers and view
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_a,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						Some(HashSet::from([Sr25519Keyring::Alice.public().into()])),
-					),
-				),
-			})
-			.await;
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_b,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						Some(HashSet::from([Sr25519Keyring::Bob.public().into()])),
-					),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_c,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						Some(HashSet::from([Sr25519Keyring::Charlie.public().into()])),
-					),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_bad,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						None,
-					),
-				),
-			})
-			.await;
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_a, view![hash_a]),
-				),
-			})
-			.await;
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_b, view![hash_a]),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_c, view![hash_a]),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_bad, view![hash_a]),
-				),
-			})
-			.await;
-
-		// receive a seconded statement from peer A, which does not provide the request data,
-		// then get that data from peer C. It should be propagated onwards to peer B and to
-		// candidate backing.
-		let statement = {
-			let signing_context = SigningContext { parent_hash: hash_a, session_index };
-
-			let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory());
-			let alice_public = Keystore::sr25519_generate_new(
-				&*keystore,
-				ValidatorId::ID,
-				Some(&Sr25519Keyring::Alice.to_seed()),
-			)
-			.unwrap();
-
-			SignedFullStatement::sign(
-				&keystore,
-				Statement::Seconded(candidate.clone().into()),
-				&signing_context,
-				ValidatorIndex(0),
-				&alice_public.into(),
-			)
-			.ok()
-			.flatten()
-			.expect("should be signed")
-		};
-
-		let metadata = derive_metadata_assuming_seconded(hash_a, statement.clone().into());
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerMessage(
-						peer_a,
-						Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(
-							metadata.clone(),
-						)),
-					),
-				),
-			})
-			.await;
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendRequests(
-					mut reqs, IfDisconnected::ImmediateError
-				)
-			) => {
-				let reqs = reqs.pop().unwrap();
-				let outgoing = match reqs {
-					Requests::StatementFetchingV1(outgoing) => outgoing,
-					_ => panic!("Unexpected request"),
-				};
-				let req = outgoing.payload;
-				assert_eq!(req.relay_parent, metadata.relay_parent);
-				assert_eq!(req.candidate_hash, metadata.candidate_hash);
-				assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
-				// Just drop request - should trigger error.
-			}
-		);
-
-		// There is a race between request handler asking for more peers and processing of the
-		// coming `PeerMessage`s, we want the request handler to ask first here for better test
-		// coverage:
-		Delay::new(Duration::from_millis(20)).await;
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerMessage(
-						peer_c,
-						Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(
-							metadata.clone(),
-						)),
-					),
-				),
-			})
-			.await;
-
-		// Malicious peer:
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerMessage(
-						peer_bad,
-						Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(
-							metadata.clone(),
-						)),
-					),
-				),
-			})
-			.await;
-
-		// Let c fail once too:
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendRequests(
-					mut reqs, IfDisconnected::ImmediateError
-				)
-			) => {
-				let reqs = reqs.pop().unwrap();
-				let outgoing = match reqs {
-					Requests::StatementFetchingV1(outgoing) => outgoing,
-					_ => panic!("Unexpected request"),
-				};
-				let req = outgoing.payload;
-				assert_eq!(req.relay_parent, metadata.relay_parent);
-				assert_eq!(req.candidate_hash, metadata.candidate_hash);
-				assert_eq!(outgoing.peer, Recipient::Peer(peer_c));
-			}
-		);
-
-		// a fails again:
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendRequests(
-					mut reqs, IfDisconnected::ImmediateError
-				)
-			) => {
-				let reqs = reqs.pop().unwrap();
-				let outgoing = match reqs {
-					Requests::StatementFetchingV1(outgoing) => outgoing,
-					_ => panic!("Unexpected request"),
-				};
-				let req = outgoing.payload;
-				assert_eq!(req.relay_parent, metadata.relay_parent);
-				assert_eq!(req.candidate_hash, metadata.candidate_hash);
-				// On retry, we should have reverse order:
-				assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
-			}
-		);
-
-		// Send invalid response (all other peers have been tried now):
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendRequests(
-					mut reqs, IfDisconnected::ImmediateError
-				)
-			) => {
-				let reqs = reqs.pop().unwrap();
-				let outgoing = match reqs {
-					Requests::StatementFetchingV1(outgoing) => outgoing,
-					_ => panic!("Unexpected request"),
-				};
-				let req = outgoing.payload;
-				assert_eq!(req.relay_parent, metadata.relay_parent);
-				assert_eq!(req.candidate_hash, metadata.candidate_hash);
-				assert_eq!(outgoing.peer, Recipient::Peer(peer_bad));
-				let bad_candidate = {
-					let mut bad = candidate.clone();
-					bad.descriptor.para_id = 0xeadbeaf.into();
-					bad.into()
-				};
-				let response = StatementFetchingResponse::Statement(bad_candidate);
-				outgoing.pending_response.send(Ok((response.encode(), ProtocolName::from("")))).unwrap();
-			}
-		);
-
-		// Should get punished and never tried again:
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))
-			) if p == peer_bad && r == COST_WRONG_HASH.into() => {}
-		);
-
-		// a is tried again (retried in reverse order):
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendRequests(
-					mut reqs, IfDisconnected::ImmediateError
-				)
-			) => {
-				let reqs = reqs.pop().unwrap();
-				let outgoing = match reqs {
-					Requests::StatementFetchingV1(outgoing) => outgoing,
-					_ => panic!("Unexpected request"),
-				};
-				let req = outgoing.payload;
-				assert_eq!(req.relay_parent, metadata.relay_parent);
-				assert_eq!(req.candidate_hash, metadata.candidate_hash);
-				// On retry, we should have reverse order:
-				assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
-			}
-		);
-
-		// c succeeds now:
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendRequests(
-					mut reqs, IfDisconnected::ImmediateError
-				)
-			) => {
-				let reqs = reqs.pop().unwrap();
-				let outgoing = match reqs {
-					Requests::StatementFetchingV1(outgoing) => outgoing,
-					_ => panic!("Unexpected request"),
-				};
-				let req = outgoing.payload;
-				assert_eq!(req.relay_parent, metadata.relay_parent);
-				assert_eq!(req.candidate_hash, metadata.candidate_hash);
-				// On retry, we should have reverse order:
-				assert_eq!(outgoing.peer, Recipient::Peer(peer_c));
-				let response = StatementFetchingResponse::Statement(candidate.clone().into());
-				outgoing.pending_response.send(Ok((response.encode(), ProtocolName::from("")))).unwrap();
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))
-			) if p == peer_a && r == COST_FETCH_FAIL.into() => {}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))
-			) if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() => {}
-		);
-
-		let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone());
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-				hash,
-				RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx),
-			)) if para_id == PARA_ID &&
-				assumption == OccupiedCoreAssumption::Free &&
-				hash == hash_a =>
-			{
-				tx.send(Ok(Some(pvd))).unwrap();
-			}
-		);
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))
-			) if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST.into() => {}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::CandidateBacking(
-				CandidateBackingMessage::Statement(r, s)
-			) if r == hash_a && s == statement_with_pvd => {}
-		);
-
-		// Now messages should go out:
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendValidationMessage(
-					mut recipients,
-					Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
-						protocol_v1::StatementDistributionMessage::LargeStatement(meta)
-					)),
-				)
-			) => {
-				gum::debug!(
-					target: LOG_TARGET,
-					?recipients,
-					"Recipients received"
-				);
-				recipients.sort();
-				let mut expected = vec![peer_b, peer_c, peer_bad];
-				expected.sort();
-				assert_eq!(recipients, expected);
-				assert_eq!(meta.relay_parent, hash_a);
-				assert_eq!(meta.candidate_hash, statement.payload().candidate_hash());
-				assert_eq!(meta.signed_by, statement.validator_index());
-				assert_eq!(&meta.signature, statement.signature());
-			}
-		);
-
-		// Now that it has the candidate it should answer requests accordingly (even after a
-		// failed request):
-
-		// Failing request first (wrong relay parent hash):
-		let (pending_response, response_rx) = oneshot::channel();
-		let inner_req = StatementFetchingRequest {
-			relay_parent: hash_b,
-			candidate_hash: metadata.candidate_hash,
-		};
-		let req = sc_network::config::IncomingRequest {
-			peer: peer_b,
-			payload: inner_req.encode(),
-			pending_response,
-		};
-		req_cfg.inbound_queue.as_mut().unwrap().send(req).await.unwrap();
-		assert_matches!(
-			response_rx.await.unwrap().result,
-			Err(()) => {}
-		);
-
-		// Another failing request (peer_a never received a statement from us, so it is not
-		// allowed to request the data):
-		let (pending_response, response_rx) = oneshot::channel();
-		let inner_req = StatementFetchingRequest {
-			relay_parent: metadata.relay_parent,
-			candidate_hash: metadata.candidate_hash,
-		};
-		let req = sc_network::config::IncomingRequest {
-			peer: peer_a,
-			payload: inner_req.encode(),
-			pending_response,
-		};
-		req_cfg.inbound_queue.as_mut().unwrap().send(req).await.unwrap();
-		assert_matches!(
-			response_rx.await.unwrap().result,
-			Err(()) => {}
-		);
-
-		// And now the succeeding request from peer_b:
-		let (pending_response, response_rx) = oneshot::channel();
-		let inner_req = StatementFetchingRequest {
-			relay_parent: metadata.relay_parent,
-			candidate_hash: metadata.candidate_hash,
-		};
-		let req = sc_network::config::IncomingRequest {
-			peer: peer_b,
-			payload: inner_req.encode(),
-			pending_response,
-		};
-		req_cfg.inbound_queue.as_mut().unwrap().send(req).await.unwrap();
-		let StatementFetchingResponse::Statement(committed) =
-			Decode::decode(&mut response_rx.await.unwrap().result.unwrap().as_ref()).unwrap();
-		assert_eq!(committed, candidate.into());
-
-		handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
-	};
-
-	futures::pin_mut!(test_fut);
-	futures::pin_mut!(bg);
-
-	executor::block_on(future::join(test_fut, bg));
-}
-
-#[test]
-fn delay_reputation_changes() {
-	sp_tracing::try_init_simple();
-	let hash_a = Hash::repeat_byte(1);
-	let pvd = dummy_pvd();
-
-	let candidate = {
-		let mut c = dummy_committed_candidate_receipt(dummy_hash());
-		c.descriptor.relay_parent = hash_a;
-		c.descriptor.para_id = 1.into();
-		c.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
-		c
-	};
-
-	let peer_a = PeerId::random(); // Alice
-	let peer_b = PeerId::random(); // Bob
-	let peer_c = PeerId::random(); // Charlie
-	let peer_bad = PeerId::random(); // No validator
-
-	let validators = vec![
-		Sr25519Keyring::Alice.pair(),
-		Sr25519Keyring::Bob.pair(),
-		Sr25519Keyring::Charlie.pair(),
-		// We:
-		Sr25519Keyring::Ferdie.pair(),
-	];
-
-	let session_info = make_session_info(validators, vec![vec![0, 1, 2, 4], vec![3]]);
-
-	let session_index = 1;
-
-	let pool = sp_core::testing::TaskExecutor::new();
-	let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
-
-	let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
-	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver::<
-		Block,
-		sc_network::NetworkWorker<Block, Hash>,
-	>(&req_protocol_names);
-	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver::<
-		Block,
-		sc_network::NetworkWorker<Block, Hash>,
-	>(&req_protocol_names);
-
-	let reputation_interval = Duration::from_millis(100);
-
-	let bg = async move {
-		let s = StatementDistributionSubsystem {
-			keystore: make_ferdie_keystore(),
-			v1_req_receiver: Some(statement_req_receiver),
-			req_receiver: Some(candidate_req_receiver),
-			metrics: Default::default(),
-			rng: AlwaysZeroRng,
-			reputation: ReputationAggregator::new(|_| false),
-		};
-		s.run_inner(ctx, reputation_interval).await.unwrap();
-	};
-
-	let test_fut = async move {
-		// register our active heads.
-		handle
-			.send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
-				ActiveLeavesUpdate::start_work(new_leaf(hash_a, 1)),
-			)))
-			.await;
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx))
-			)
-				if r == hash_a
-			=> {
-				let _ = tx.send(Err(ASYNC_BACKING_DISABLED_ERROR));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
-			)
-				if r == hash_a
-			=> {
-				let _ = tx.send(Ok(session_index));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
-			)
-				if r == hash_a && sess_index == session_index
-			=> {
-				let _ = tx.send(Ok(Some(session_info)));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionExecutorParams(sess_index, tx))
-			)
-				if r == hash_a && sess_index == session_index
-			=> {
-				let _ = tx.send(Ok(Some(ExecutorParams::default())));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), )
-			) => {
-				si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap();
-			}
-		);
-
-		// notify of peers and view
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_a,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						Some(HashSet::from([Sr25519Keyring::Alice.public().into()])),
-					),
-				),
-			})
-			.await;
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_b,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						Some(HashSet::from([Sr25519Keyring::Bob.public().into()])),
-					),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_c,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						Some(HashSet::from([Sr25519Keyring::Charlie.public().into()])),
-					),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_bad,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						None,
-					),
-				),
-			})
-			.await;
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_a, view![hash_a]),
-				),
-			})
-			.await;
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_b, view![hash_a]),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_c, view![hash_a]),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_bad, view![hash_a]),
-				),
-			})
-			.await;
-
-		// receive a seconded statement from peer A, which does not provide the request data,
-		// then get that data from peer C. It should be propagated onwards to peer B and to
-		// candidate backing.
-		let statement = {
-			let signing_context = SigningContext { parent_hash: hash_a, session_index };
-
-			let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory());
-			let alice_public = Keystore::sr25519_generate_new(
-				&*keystore,
-				ValidatorId::ID,
-				Some(&Sr25519Keyring::Alice.to_seed()),
-			)
-			.unwrap();
-
-			SignedFullStatement::sign(
-				&keystore,
-				Statement::Seconded(candidate.clone().into()),
-				&signing_context,
-				ValidatorIndex(0),
-				&alice_public.into(),
-			)
-			.ok()
-			.flatten()
-			.expect("should be signed")
-		};
-
-		let metadata = derive_metadata_assuming_seconded(hash_a, statement.clone().into());
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerMessage(
-						peer_a,
-						Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(
-							metadata.clone(),
-						)),
-					),
-				),
-			})
-			.await;
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendRequests(
-					mut reqs, IfDisconnected::ImmediateError
-				)
-			) => {
-				let reqs = reqs.pop().unwrap();
-				let outgoing = match reqs {
-					Requests::StatementFetchingV1(outgoing) => outgoing,
-					_ => panic!("Unexpected request"),
-				};
-				let req = outgoing.payload;
-				assert_eq!(req.relay_parent, metadata.relay_parent);
-				assert_eq!(req.candidate_hash, metadata.candidate_hash);
-				assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
-				// Just drop request - should trigger error.
-			}
-		);
-
-		// There is a race between request handler asking for more peers and processing of the
-		// coming `PeerMessage`s, we want the request handler to ask first here for better test
-		// coverage:
-		Delay::new(Duration::from_millis(20)).await;
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerMessage(
-						peer_c,
-						Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(
-							metadata.clone(),
-						)),
-					),
-				),
-			})
-			.await;
-
-		// Malicious peer:
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerMessage(
-						peer_bad,
-						Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(
-							metadata.clone(),
-						)),
-					),
-				),
-			})
-			.await;
-
-		// Let c fail once too:
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendRequests(
-					mut reqs, IfDisconnected::ImmediateError
-				)
-			) => {
-				let reqs = reqs.pop().unwrap();
-				let outgoing = match reqs {
-					Requests::StatementFetchingV1(outgoing) => outgoing,
-					_ => panic!("Unexpected request"),
-				};
-				let req = outgoing.payload;
-				assert_eq!(req.relay_parent, metadata.relay_parent);
-				assert_eq!(req.candidate_hash, metadata.candidate_hash);
-				assert_eq!(outgoing.peer, Recipient::Peer(peer_c));
-			}
-		);
-
-		// a fails again:
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendRequests(
-					mut reqs, IfDisconnected::ImmediateError
-				)
-			) => {
-				let reqs = reqs.pop().unwrap();
-				let outgoing = match reqs {
-					Requests::StatementFetchingV1(outgoing) => outgoing,
-					_ => panic!("Unexpected request"),
-				};
-				let req = outgoing.payload;
-				assert_eq!(req.relay_parent, metadata.relay_parent);
-				assert_eq!(req.candidate_hash, metadata.candidate_hash);
-				// On retry, we should have reverse order:
-				assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
-			}
-		);
-
-		// Send invalid response (all other peers have been tried now):
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendRequests(
-					mut reqs, IfDisconnected::ImmediateError
-				)
-			) => {
-				let reqs = reqs.pop().unwrap();
-				let outgoing = match reqs {
-					Requests::StatementFetchingV1(outgoing) => outgoing,
-					_ => panic!("Unexpected request"),
-				};
-				let req = outgoing.payload;
-				assert_eq!(req.relay_parent, metadata.relay_parent);
-				assert_eq!(req.candidate_hash, metadata.candidate_hash);
-				assert_eq!(outgoing.peer, Recipient::Peer(peer_bad));
-				let bad_candidate = {
-					let mut bad = candidate.clone();
-					bad.descriptor.para_id = 0xeadbeaf.into();
-					bad
-				};
-				let response = StatementFetchingResponse::Statement(bad_candidate.into());
-				outgoing.pending_response.send(Ok((response.encode(), ProtocolName::from("")))).unwrap();
-			}
-		);
-
-		// a is tried again (retried in reverse order):
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendRequests(
-					mut reqs, IfDisconnected::ImmediateError
-				)
-			) => {
-				let reqs = reqs.pop().unwrap();
-				let outgoing = match reqs {
-					Requests::StatementFetchingV1(outgoing) => outgoing,
-					_ => panic!("Unexpected request"),
-				};
-				let req = outgoing.payload;
-				assert_eq!(req.relay_parent, metadata.relay_parent);
-				assert_eq!(req.candidate_hash, metadata.candidate_hash);
-				// On retry, we should have reverse order:
-				assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
-			}
-		);
-
-		// c succeeds now:
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendRequests(
-					mut reqs, IfDisconnected::ImmediateError
-				)
-			) => {
-				let reqs = reqs.pop().unwrap();
-				let outgoing = match reqs {
-					Requests::StatementFetchingV1(outgoing) => outgoing,
-					_ => panic!("Unexpected request"),
-				};
-				let req = outgoing.payload;
-				assert_eq!(req.relay_parent, metadata.relay_parent);
-				assert_eq!(req.candidate_hash, metadata.candidate_hash);
-				// On retry, we should have reverse order:
-				assert_eq!(outgoing.peer, Recipient::Peer(peer_c));
-				let response = StatementFetchingResponse::Statement(candidate.clone().into());
-				outgoing.pending_response.send(Ok((response.encode(), ProtocolName::from("")))).unwrap();
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-				hash,
-				RuntimeApiRequest::PersistedValidationData(_, assumption, tx),
-			)) if assumption == OccupiedCoreAssumption::Free && hash == hash_a =>
-			{
-				tx.send(Ok(Some(pvd))).unwrap();
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::CandidateBacking(CandidateBackingMessage::Statement(..))
-		);
-
-		// Now messages should go out:
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendValidationMessage(
-					mut recipients,
-					Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
-						protocol_v1::StatementDistributionMessage::LargeStatement(meta)
-					)),
-				)
-			) => {
-				gum::debug!(
-					target: LOG_TARGET,
-					?recipients,
-					"Recipients received"
-				);
-				recipients.sort();
-				let mut expected = vec![peer_b, peer_c, peer_bad];
-				expected.sort();
-				assert_eq!(recipients, expected);
-				assert_eq!(meta.relay_parent, hash_a);
-				assert_eq!(meta.candidate_hash, statement.payload().candidate_hash());
-				assert_eq!(meta.signed_by, statement.validator_index());
-				assert_eq!(&meta.signature, statement.signature());
-			}
-		);
-
-		// Wait enough to fire reputation delay
-		futures_timer::Delay::new(reputation_interval).await;
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Batch(v))
-			) => {
-				let mut expected_change = HashMap::new();
-				for rep in vec![COST_FETCH_FAIL, BENEFIT_VALID_STATEMENT_FIRST] {
-					add_reputation(&mut expected_change, peer_a, rep)
-				}
-				for rep in vec![BENEFIT_VALID_RESPONSE, BENEFIT_VALID_STATEMENT] {
-					add_reputation(&mut expected_change, peer_c, rep)
-				}
-				for rep in vec![COST_WRONG_HASH, BENEFIT_VALID_STATEMENT] {
-					add_reputation(&mut expected_change, peer_bad, rep)
-				}
-				assert_eq!(v, expected_change);
-			}
-		);
-
-		handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
-	};
-
-	futures::pin_mut!(test_fut);
-	futures::pin_mut!(bg);
-
-	executor::block_on(future::join(test_fut, bg));
-}
-
-#[test]
-fn share_prioritizes_backing_group() {
-	sp_tracing::try_init_simple();
-	let hash_a = Hash::repeat_byte(1);
-
-	let candidate = {
-		let mut c = dummy_committed_candidate_receipt(dummy_hash());
-		c.descriptor.relay_parent = hash_a;
-		c.descriptor.para_id = 1.into();
-		c.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
-		c
-	};
-
-	let peer_a = PeerId::random(); // Alice
-	let peer_b = PeerId::random(); // Bob
-	let peer_c = PeerId::random(); // Charlie
-	let peer_bad = PeerId::random(); // No validator
-	let peer_other_group = PeerId::random(); //Ferdie
-
-	let mut validators = vec![
-		Sr25519Keyring::Alice.pair(),
-		Sr25519Keyring::Bob.pair(),
-		Sr25519Keyring::Charlie.pair(),
-		// other group
-		Sr25519Keyring::Dave.pair(),
-		// We:
-		Sr25519Keyring::Ferdie.pair(),
-	];
-
-	// Strictly speaking we only need MIN_GOSSIP_PEERS - 3 to make sure only priority peers
-	// will be served, but by using a larger value we test for overflow errors:
-	let dummy_count = MIN_GOSSIP_PEERS;
-
-	// We artificially inflate our group, so there won't be any free slots for other peers. (We
-	// want to test that our group is prioritized):
-	let dummy_pairs: Vec<_> =
-		std::iter::repeat_with(|| Pair::generate().0).take(dummy_count).collect();
-	let dummy_peers: Vec<_> =
-		std::iter::repeat_with(|| PeerId::random()).take(dummy_count).collect();
-
-	validators = validators.into_iter().chain(dummy_pairs.clone()).collect();
-
-	let mut first_group = vec![0, 1, 2, 4];
-	first_group.append(&mut (0..dummy_count as u32).map(|v| v + 5).collect());
-	let session_info = make_session_info(validators, vec![first_group, vec![3]]);
-
-	let session_index = 1;
-
-	let pool = sp_core::testing::TaskExecutor::new();
-	let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
-
-	let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
-	let (statement_req_receiver, mut req_cfg) = IncomingRequest::get_config_receiver::<
-		Block,
-		sc_network::NetworkWorker<Block, Hash>,
-	>(&req_protocol_names);
-	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver::<
-		Block,
-		sc_network::NetworkWorker<Block, Hash>,
-	>(&req_protocol_names);
-
-	let bg = async move {
-		let s = StatementDistributionSubsystem {
-			keystore: make_ferdie_keystore(),
-			v1_req_receiver: Some(statement_req_receiver),
-			req_receiver: Some(candidate_req_receiver),
-			metrics: Default::default(),
-			rng: AlwaysZeroRng,
-			reputation: ReputationAggregator::new(|_| true),
-		};
-		s.run(ctx).await.unwrap();
-	};
-
-	let test_fut = async move {
-		// register our active heads.
-		handle
-			.send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
-				ActiveLeavesUpdate::start_work(new_leaf(hash_a, 1)),
-			)))
-			.await;
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx))
-			)
-				if r == hash_a
-			=> {
-				let _ = tx.send(Err(ASYNC_BACKING_DISABLED_ERROR));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
-			)
-				if r == hash_a
-			=> {
-				let _ = tx.send(Ok(session_index));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
-			)
-				if r == hash_a && sess_index == session_index
-			=> {
-				let _ = tx.send(Ok(Some(session_info)));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionExecutorParams(sess_index, tx))
-			)
-				if r == hash_a && sess_index == session_index
-			=> {
-				let _ = tx.send(Ok(Some(ExecutorParams::default())));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), )
-			) => {
-				si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap();
-			}
-		);
-
-		// notify of dummy peers and view
-		for (peer, pair) in dummy_peers.clone().into_iter().zip(dummy_pairs) {
-			handle
-				.send(FromOrchestra::Communication {
-					msg: StatementDistributionMessage::NetworkBridgeUpdate(
-						NetworkBridgeEvent::PeerConnected(
-							peer,
-							ObservedRole::Full,
-							ValidationVersion::V1.into(),
-							Some(HashSet::from([pair.public().into()])),
-						),
-					),
-				})
-				.await;
-
-			handle
-				.send(FromOrchestra::Communication {
-					msg: StatementDistributionMessage::NetworkBridgeUpdate(
-						NetworkBridgeEvent::PeerViewChange(peer, view![hash_a]),
-					),
-				})
-				.await;
-		}
-
-		// notify of peers and view
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_a,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						Some(HashSet::from([Sr25519Keyring::Alice.public().into()])),
-					),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_b,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						Some(HashSet::from([Sr25519Keyring::Bob.public().into()])),
-					),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_c,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						Some(HashSet::from([Sr25519Keyring::Charlie.public().into()])),
-					),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_bad,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						None,
-					),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_other_group,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						Some(HashSet::from([Sr25519Keyring::Dave.public().into()])),
-					),
-				),
-			})
-			.await;
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_a, view![hash_a]),
-				),
-			})
-			.await;
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_b, view![hash_a]),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_c, view![hash_a]),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_bad, view![hash_a]),
-				),
-			})
-			.await;
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_other_group, view![hash_a]),
-				),
-			})
-			.await;
-
-		// receive a seconded statement from peer A, which does not provide the request data,
-		// then get that data from peer C. It should be propagated onwards to peer B and to
-		// candidate backing.
-		let statement = {
-			let signing_context = SigningContext { parent_hash: hash_a, session_index };
-
-			let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory());
-			let ferdie_public = Keystore::sr25519_generate_new(
-				&*keystore,
-				ValidatorId::ID,
-				Some(&Sr25519Keyring::Ferdie.to_seed()),
-			)
-			.unwrap();
-
-			// note: this is ignored by legacy-v1 code.
-			let pvd = PersistedValidationData {
-				parent_head: HeadData::from(vec![1, 2, 3]),
-				relay_parent_number: 0,
-				relay_parent_storage_root: Hash::repeat_byte(42),
-				max_pov_size: 100,
-			};
-
-			SignedFullStatementWithPVD::sign(
-				&keystore,
-				Statement::Seconded(candidate.clone().into()).supply_pvd(pvd),
-				&signing_context,
-				ValidatorIndex(4),
-				&ferdie_public.into(),
-			)
-			.ok()
-			.flatten()
-			.expect("should be signed")
-		};
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::Share(hash_a, statement.clone()),
-			})
-			.await;
-
-		let statement = StatementWithPVD::drop_pvd_from_signed(statement);
-		let metadata = derive_metadata_assuming_seconded(hash_a, statement.clone().into());
-
-		// Messages should go out:
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendValidationMessage(
-					mut recipients,
-					Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
-						protocol_v1::StatementDistributionMessage::LargeStatement(meta)
-					)),
-				)
-			) => {
-				gum::debug!(
-					target: LOG_TARGET,
-					?recipients,
-					"Recipients received"
-				);
-				recipients.sort();
-				// We expect only our backing group to be the recipients, du to the inflated
-				// test group above:
-				let mut expected: Vec<_> = vec![peer_a, peer_b, peer_c].into_iter().chain(dummy_peers).collect();
-				expected.sort();
-				assert_eq!(recipients.len(), expected.len());
-				assert_eq!(recipients, expected);
-				assert_eq!(meta.relay_parent, hash_a);
-				assert_eq!(meta.candidate_hash, statement.payload().candidate_hash());
-				assert_eq!(meta.signed_by, statement.validator_index());
-				assert_eq!(&meta.signature, statement.signature());
-			}
-		);
-
-		// Now that it has the candidate it should answer requests accordingly:
-
-		let (pending_response, response_rx) = oneshot::channel();
-		let inner_req = StatementFetchingRequest {
-			relay_parent: metadata.relay_parent,
-			candidate_hash: metadata.candidate_hash,
-		};
-		let req = sc_network::config::IncomingRequest {
-			peer: peer_b,
-			payload: inner_req.encode(),
-			pending_response,
-		};
-		req_cfg.inbound_queue.as_mut().unwrap().send(req).await.unwrap();
-		let StatementFetchingResponse::Statement(committed) =
-			Decode::decode(&mut response_rx.await.unwrap().result.unwrap().as_ref()).unwrap();
-		assert_eq!(committed, candidate.into());
-
-		handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
-	};
-
-	futures::pin_mut!(test_fut);
-	futures::pin_mut!(bg);
-
-	executor::block_on(future::join(test_fut, bg));
-}
-
-#[test]
-fn peer_cant_flood_with_large_statements() {
-	sp_tracing::try_init_simple();
-	let hash_a = Hash::repeat_byte(1);
-
-	let candidate = {
-		let mut c = dummy_committed_candidate_receipt(dummy_hash());
-		c.descriptor.relay_parent = hash_a;
-		c.descriptor.para_id = 1.into();
-		c.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3]));
-		c
-	};
-
-	let peer_a = PeerId::random(); // Alice
-
-	let validators = vec![
-		Sr25519Keyring::Alice.pair(),
-		Sr25519Keyring::Bob.pair(),
-		Sr25519Keyring::Charlie.pair(),
-		// other group
-		Sr25519Keyring::Dave.pair(),
-		// We:
-		Sr25519Keyring::Ferdie.pair(),
-	];
-
-	let first_group = vec![0, 1, 2, 4];
-	let session_info = make_session_info(validators, vec![first_group, vec![3]]);
-
-	let session_index = 1;
-
-	let pool = sp_core::testing::TaskExecutor::new();
-	let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
-
-	let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
-	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver::<
-		Block,
-		sc_network::NetworkWorker<Block, Hash>,
-	>(&req_protocol_names);
-	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver::<
-		Block,
-		sc_network::NetworkWorker<Block, Hash>,
-	>(&req_protocol_names);
-	let bg = async move {
-		let s = StatementDistributionSubsystem {
-			keystore: make_ferdie_keystore(),
-			v1_req_receiver: Some(statement_req_receiver),
-			req_receiver: Some(candidate_req_receiver),
-			metrics: Default::default(),
-			rng: AlwaysZeroRng,
-			reputation: ReputationAggregator::new(|_| true),
-		};
-		s.run(ctx).await.unwrap();
-	};
-
-	let test_fut = async move {
-		// register our active heads.
-		handle
-			.send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
-				ActiveLeavesUpdate::start_work(new_leaf(hash_a, 1)),
-			)))
-			.await;
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx))
-			)
-				if r == hash_a
-			=> {
-				let _ = tx.send(Err(ASYNC_BACKING_DISABLED_ERROR));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
-			)
-				if r == hash_a
-			=> {
-				let _ = tx.send(Ok(session_index));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
-			)
-				if r == hash_a && sess_index == session_index
-			=> {
-				let _ = tx.send(Ok(Some(session_info)));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionExecutorParams(sess_index, tx))
-			)
-				if r == hash_a && sess_index == session_index
-			=> {
-				let _ = tx.send(Ok(Some(ExecutorParams::default())));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), )
-			) => {
-				si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap();
-			}
-		);
-
-		// notify of peers and view
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerConnected(
-						peer_a,
-						ObservedRole::Full,
-						ValidationVersion::V1.into(),
-						Some(HashSet::from([Sr25519Keyring::Alice.public().into()])),
-					),
-				),
-			})
-			.await;
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerViewChange(peer_a, view![hash_a]),
-				),
-			})
-			.await;
-
-		// receive a seconded statement from peer A.
-		let statement = {
-			let signing_context = SigningContext { parent_hash: hash_a, session_index };
-
-			let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory());
-			let alice_public = Keystore::sr25519_generate_new(
-				&*keystore,
-				ValidatorId::ID,
-				Some(&Sr25519Keyring::Alice.to_seed()),
-			)
-			.unwrap();
-
-			SignedFullStatement::sign(
-				&keystore,
-				Statement::Seconded(candidate.clone().into()),
-				&signing_context,
-				ValidatorIndex(0),
-				&alice_public.into(),
-			)
-			.ok()
-			.flatten()
-			.expect("should be signed")
-		};
-
-		let metadata = derive_metadata_assuming_seconded(hash_a, statement.clone().into());
-
-		for _ in 0..MAX_LARGE_STATEMENTS_PER_SENDER + 1 {
-			handle
-				.send(FromOrchestra::Communication {
-					msg: StatementDistributionMessage::NetworkBridgeUpdate(
-						NetworkBridgeEvent::PeerMessage(
-							peer_a,
-							Versioned::V1(
-								protocol_v1::StatementDistributionMessage::LargeStatement(
-									metadata.clone(),
-								),
-							),
-						),
-					),
-				})
-				.await;
-		}
-
-		// We should try to fetch the data and punish the peer (but we don't know what comes
-		// first):
-		let mut requested = false;
-		let mut punished = false;
-		for _ in 0..2 {
-			match handle.recv().await {
-				AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(
-					mut reqs,
-					IfDisconnected::ImmediateError,
-				)) => {
-					let reqs = reqs.pop().unwrap();
-					let outgoing = match reqs {
-						Requests::StatementFetchingV1(outgoing) => outgoing,
-						_ => panic!("Unexpected request"),
-					};
-					let req = outgoing.payload;
-					assert_eq!(req.relay_parent, metadata.relay_parent);
-					assert_eq!(req.candidate_hash, metadata.candidate_hash);
-					assert_eq!(outgoing.peer, Recipient::Peer(peer_a));
-					// Just drop request - should trigger error.
-					requested = true;
-				},
-
-				AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(
-					ReportPeerMessage::Single(p, r),
-				)) if p == peer_a && r == COST_APPARENT_FLOOD.into() => {
-					punished = true;
-				},
-
-				m => panic!("Unexpected message: {:?}", m),
-			}
-		}
-		assert!(requested, "large data has not been requested.");
-		assert!(punished, "Peer should have been punished for flooding.");
-
-		handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
-	};
-
-	futures::pin_mut!(test_fut);
-	futures::pin_mut!(bg);
-
-	executor::block_on(future::join(test_fut, bg));
-}
-
-// This test addresses an issue when received knowledge is not updated on a
-// subsequent `Seconded` statements
-// See https://github.com/paritytech/polkadot/pull/5177
-#[test]
-fn handle_multiple_seconded_statements() {
-	let relay_parent_hash = Hash::repeat_byte(1);
-	let pvd = dummy_pvd();
-
-	let candidate = dummy_committed_candidate_receipt_v2(relay_parent_hash);
-	let candidate_hash = candidate.hash();
-
-	// We want to ensure that our peers are not lucky
-	let mut all_peers: Vec<PeerId> = Vec::with_capacity(MIN_GOSSIP_PEERS + 4);
-	let peer_a = PeerId::random();
-	let peer_b = PeerId::random();
-	assert_ne!(peer_a, peer_b);
-
-	for _ in 0..MIN_GOSSIP_PEERS + 2 {
-		all_peers.push(PeerId::random());
-	}
-	all_peers.push(peer_a);
-	all_peers.push(peer_b);
-
-	let mut lucky_peers = all_peers.clone();
-	util::choose_random_subset_with_rng(
-		|_| false,
-		&mut lucky_peers,
-		&mut AlwaysZeroRng,
-		MIN_GOSSIP_PEERS,
-	);
-	lucky_peers.sort();
-	assert_eq!(lucky_peers.len(), MIN_GOSSIP_PEERS);
-	assert!(!lucky_peers.contains(&peer_a));
-	assert!(!lucky_peers.contains(&peer_b));
-
-	let validators = vec![
-		Sr25519Keyring::Alice.pair(),
-		Sr25519Keyring::Bob.pair(),
-		Sr25519Keyring::Charlie.pair(),
-	];
-
-	let session_info = make_session_info(validators, vec![]);
-
-	let session_index = 1;
-
-	let pool = sp_core::testing::TaskExecutor::new();
-	let (ctx, mut handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
-
-	let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
-	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver::<
-		Block,
-		sc_network::NetworkWorker<Block, Hash>,
-	>(&req_protocol_names);
-	let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver::<
-		Block,
-		sc_network::NetworkWorker<Block, Hash>,
-	>(&req_protocol_names);
-
-	let virtual_overseer_fut = async move {
-		let s = StatementDistributionSubsystem {
-			keystore: Arc::new(LocalKeystore::in_memory()),
-			v1_req_receiver: Some(statement_req_receiver),
-			req_receiver: Some(candidate_req_receiver),
-			metrics: Default::default(),
-			rng: AlwaysZeroRng,
-			reputation: ReputationAggregator::new(|_| true),
-		};
-		s.run(ctx).await.unwrap();
-	};
-
-	let test_fut = async move {
-		// register our active heads.
-		handle
-			.send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
-				ActiveLeavesUpdate::start_work(new_leaf(relay_parent_hash, 1)),
-			)))
-			.await;
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::AsyncBackingParams(tx))
-			)
-				if r == relay_parent_hash
-			=> {
-				let _ = tx.send(Err(ASYNC_BACKING_DISABLED_ERROR));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionIndexForChild(tx))
-			)
-				if r == relay_parent_hash
-			=> {
-				let _ = tx.send(Ok(session_index));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionInfo(sess_index, tx))
-			)
-				if r == relay_parent_hash && sess_index == session_index
-			=> {
-				let _ = tx.send(Ok(Some(session_info)));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(r, RuntimeApiRequest::SessionExecutorParams(sess_index, tx))
-			)
-				if r == relay_parent_hash && sess_index == session_index
-			=> {
-				let _ = tx.send(Ok(Some(ExecutorParams::default())));
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(
-				RuntimeApiMessage::Request(_, RuntimeApiRequest::NodeFeatures(_, si_tx), )
-			) => {
-				si_tx.send(Ok(NodeFeatures::EMPTY)).unwrap();
-			}
-		);
-		// notify of peers and view
-		for peer in all_peers.iter() {
-			handle
-				.send(FromOrchestra::Communication {
-					msg: StatementDistributionMessage::NetworkBridgeUpdate(
-						NetworkBridgeEvent::PeerConnected(
-							*peer,
-							ObservedRole::Full,
-							ValidationVersion::V1.into(),
-							None,
-						),
-					),
-				})
-				.await;
-			handle
-				.send(FromOrchestra::Communication {
-					msg: StatementDistributionMessage::NetworkBridgeUpdate(
-						NetworkBridgeEvent::PeerViewChange(*peer, view![relay_parent_hash]),
-					),
-				})
-				.await;
-		}
-
-		// Set up a topology which puts peers a & b in a column together.
-		let gossip_topology = {
-			// create a lucky_peers+1 * lucky_peers+1 grid topology where we are at index 2, sharing
-			// a row with peer_a (0) and peer_b (1) and a column with all the lucky peers.
-			// the rest is filled with junk.
-			// This is an absolute garbage hack depending on quirks of the implementation
-			// and not on sound architecture.
-
-			let n_lucky = lucky_peers.len();
-			let dim = n_lucky + 1;
-			let grid_size = dim * dim;
-			let topology_peer_info: Vec<_> = (0..grid_size)
-				.map(|i| {
-					if i == 0 {
-						TopologyPeerInfo {
-							peer_ids: vec![peer_a],
-							validator_index: ValidatorIndex(0),
-							discovery_id: AuthorityPair::generate().0.public(),
-						}
-					} else if i == 1 {
-						TopologyPeerInfo {
-							peer_ids: vec![peer_b],
-							validator_index: ValidatorIndex(1),
-							discovery_id: AuthorityPair::generate().0.public(),
-						}
-					} else if i == 2 {
-						TopologyPeerInfo {
-							peer_ids: vec![],
-							validator_index: ValidatorIndex(2),
-							discovery_id: AuthorityPair::generate().0.public(),
-						}
-					} else if (i - 2) % dim == 0 {
-						let lucky_index = ((i - 2) / dim) - 1;
-						TopologyPeerInfo {
-							peer_ids: vec![lucky_peers[lucky_index]],
-							validator_index: ValidatorIndex(i as _),
-							discovery_id: AuthorityPair::generate().0.public(),
-						}
-					} else {
-						TopologyPeerInfo {
-							peer_ids: vec![PeerId::random()],
-							validator_index: ValidatorIndex(i as _),
-							discovery_id: AuthorityPair::generate().0.public(),
-						}
-					}
-				})
-				.collect();
-
-			// also a hack: this is only required to be accurate for
-			// the validator indices we compute grid neighbors for.
-			let mut shuffled_indices = vec![0; grid_size];
-			shuffled_indices[2] = 2;
-
-			// Some sanity checking to make sure this hack is set up correctly.
-			let topology = SessionGridTopology::new(shuffled_indices, topology_peer_info);
-			let grid_neighbors = topology.compute_grid_neighbors_for(ValidatorIndex(2)).unwrap();
-			assert_eq!(grid_neighbors.peers_x.len(), 25);
-			assert!(grid_neighbors.peers_x.contains(&peer_a));
-			assert!(grid_neighbors.peers_x.contains(&peer_b));
-			assert!(!grid_neighbors.peers_y.contains(&peer_b));
-			assert!(!grid_neighbors.route_to_peer(RequiredRouting::GridY, &peer_b));
-			assert_eq!(grid_neighbors.peers_y.len(), lucky_peers.len());
-			for lucky in &lucky_peers {
-				assert!(grid_neighbors.peers_y.contains(lucky));
-			}
-
-			network_bridge_event::NewGossipTopology {
-				session: 1,
-				topology,
-				local_index: Some(ValidatorIndex(2)),
-			}
-		};
-
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::NewGossipTopology(gossip_topology),
-				),
-			})
-			.await;
-
-		// receive a seconded statement from peer A. it should be propagated onwards to peer B and
-		// to candidate backing.
-		let statement = {
-			let signing_context = SigningContext { parent_hash: relay_parent_hash, session_index };
-
-			let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory());
-			let alice_public = Keystore::sr25519_generate_new(
-				&*keystore,
-				ValidatorId::ID,
-				Some(&Sr25519Keyring::Alice.to_seed()),
-			)
-			.unwrap();
-
-			SignedFullStatement::sign(
-				&keystore,
-				Statement::Seconded(candidate.clone()),
-				&signing_context,
-				ValidatorIndex(0),
-				&alice_public.into(),
-			)
-			.ok()
-			.flatten()
-			.expect("should be signed")
-		};
-
-		// `PeerA` sends a `Seconded` message
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerMessage(
-						peer_a,
-						Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
-							relay_parent_hash,
-							statement.clone().into(),
-						)),
-					),
-				),
-			})
-			.await;
-
-		let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone());
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::RuntimeApi(RuntimeApiMessage::Request(
-				_,
-				RuntimeApiRequest::PersistedValidationData(_, assumption, tx),
-			)) if assumption == OccupiedCoreAssumption::Free => {
-				tx.send(Ok(Some(pvd.clone()))).unwrap();
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))
-			) => {
-				assert_eq!(p, peer_a);
-				assert_eq!(r, BENEFIT_VALID_STATEMENT_FIRST.into());
-			}
-		);
-
-		// After the first valid statement, we expect messages to be circulated
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::CandidateBacking(
-				CandidateBackingMessage::Statement(r, s)
-			) => {
-				assert_eq!(r, relay_parent_hash);
-				assert_eq!(s, statement_with_pvd);
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendValidationMessage(
-					recipients,
-					Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
-						protocol_v1::StatementDistributionMessage::Statement(r, s)
-					)),
-				)
-			) => {
-				assert!(!recipients.contains(&peer_b));
-				assert_eq!(r, relay_parent_hash);
-				assert_eq!(s, statement.clone().into());
-			}
-		);
-
-		// `PeerB` sends a `Seconded` message: valid but known
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerMessage(
-						peer_b,
-						Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
-							relay_parent_hash,
-							statement.clone().into(),
-						)),
-					),
-				),
-			})
-			.await;
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))
-			) => {
-				assert_eq!(p, peer_b);
-				assert_eq!(r, BENEFIT_VALID_STATEMENT.into());
-			}
-		);
-
-		// Create a `Valid` statement
-		let statement = {
-			let signing_context = SigningContext { parent_hash: relay_parent_hash, session_index };
-
-			let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory());
-			let alice_public = Keystore::sr25519_generate_new(
-				&*keystore,
-				ValidatorId::ID,
-				Some(&Sr25519Keyring::Alice.to_seed()),
-			)
-			.unwrap();
-
-			SignedFullStatement::sign(
-				&keystore,
-				Statement::Valid(candidate_hash),
-				&signing_context,
-				ValidatorIndex(0),
-				&alice_public.into(),
-			)
-			.ok()
-			.flatten()
-			.expect("should be signed")
-		};
-
-		// `PeerA` sends a `Valid` message
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerMessage(
-						peer_a,
-						Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
-							relay_parent_hash,
-							statement.clone().into(),
-						)),
-					),
-				),
-			})
-			.await;
-
-		let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone());
-
-		// Persisted validation data is cached.
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))
-			) => {
-				assert_eq!(p, peer_a);
-				assert_eq!(r, BENEFIT_VALID_STATEMENT_FIRST.into());
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::CandidateBacking(
-				CandidateBackingMessage::Statement(r, s)
-			) => {
-				assert_eq!(r, relay_parent_hash);
-				assert_eq!(s, statement_with_pvd);
-			}
-		);
-
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::SendValidationMessage(
-					recipients,
-					Versioned::V1(protocol_v1::ValidationProtocol::StatementDistribution(
-						protocol_v1::StatementDistributionMessage::Statement(r, s)
-					)),
-				)
-			) => {
-				assert!(!recipients.contains(&peer_b));
-				assert_eq!(r, relay_parent_hash);
-				assert_eq!(s, statement.clone().into());
-			}
-		);
-
-		// `PeerB` sends a `Valid` message
-		handle
-			.send(FromOrchestra::Communication {
-				msg: StatementDistributionMessage::NetworkBridgeUpdate(
-					NetworkBridgeEvent::PeerMessage(
-						peer_b,
-						Versioned::V1(protocol_v1::StatementDistributionMessage::Statement(
-							relay_parent_hash,
-							statement.clone().into(),
-						)),
-					),
-				),
-			})
-			.await;
-
-		// We expect that this is still valid despite the fact that `PeerB` was not
-		// the first when sending `Seconded`
-		assert_matches!(
-			handle.recv().await,
-			AllMessages::NetworkBridgeTx(
-				NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))
-			) => {
-				assert_eq!(p, peer_b);
-				assert_eq!(r, BENEFIT_VALID_STATEMENT.into());
-			}
-		);
-
-		handle.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
-	};
-
-	futures::pin_mut!(test_fut);
-	futures::pin_mut!(virtual_overseer_fut);
-
-	executor::block_on(future::join(test_fut, virtual_overseer_fut));
-}
-
 fn make_session_info(validators: Vec<Pair>, groups: Vec<Vec<u32>>) -> SessionInfo {
 	let validator_groups: IndexedVec<GroupIndex, Vec<ValidatorIndex>> = groups
 		.iter()
diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs
index 33431eb1edce585943100e4c23d7234a39779f9c..3b5d00921ca0cf00387e6aa7626524df00ff4c31 100644
--- a/polkadot/node/network/statement-distribution/src/lib.rs
+++ b/polkadot/node/network/statement-distribution/src/lib.rs
@@ -28,7 +28,6 @@ use polkadot_node_network_protocol::{
 	request_response::{v1 as request_v1, v2::AttestedCandidateRequest, IncomingRequestReceiver},
 	v2 as protocol_v2, v3 as protocol_v3, Versioned,
 };
-use polkadot_node_primitives::StatementWithPVD;
 use polkadot_node_subsystem::{
 	messages::{NetworkBridgeEvent, StatementDistributionMessage},
 	overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError,
@@ -36,7 +35,6 @@ use polkadot_node_subsystem::{
 use polkadot_node_subsystem_util::{
 	rand,
 	reputation::{ReputationAggregator, REPUTATION_CHANGE_INTERVAL},
-	runtime::{prospective_parachains_mode, ProspectiveParachainsMode},
 };
 
 use futures::{channel::mpsc, prelude::*};
@@ -322,33 +320,14 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
 			})) => {
 				let _timer = metrics.time_active_leaves_update();
 
-				// v2 should handle activated first because of implicit view.
 				if let Some(ref activated) = activated {
-					let mode = prospective_parachains_mode(ctx.sender(), activated.hash).await?;
-					if let ProspectiveParachainsMode::Enabled { .. } = mode {
-						let res =
-							v2::handle_active_leaves_update(ctx, state, activated, mode, &metrics)
-								.await;
-						// Regardless of the result of leaf activation, we always prune before
-						// handling it to avoid leaks.
-						v2::handle_deactivate_leaves(state, &deactivated);
-						res?;
-					} else if let ProspectiveParachainsMode::Disabled = mode {
-						for deactivated in &deactivated {
-							crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated);
-						}
-
-						crate::legacy_v1::handle_activated_leaf(
-							ctx,
-							legacy_v1_state,
-							activated.clone(),
-						)
-						.await?;
-					}
+					let res =
+						v2::handle_active_leaves_update(ctx, state, activated, &metrics).await;
+					// Regardless of the result of leaf activation, we always prune before
+					// handling it to avoid leaks.
+					v2::handle_deactivate_leaves(state, &deactivated);
+					res?;
 				} else {
-					for deactivated in &deactivated {
-						crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated);
-					}
 					v2::handle_deactivate_leaves(state, &deactivated);
 				}
 			},
@@ -360,28 +339,15 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
 				StatementDistributionMessage::Share(relay_parent, statement) => {
 					let _timer = metrics.time_share();
 
-					// pass to legacy if legacy state contains head.
-					if legacy_v1_state.contains_relay_parent(&relay_parent) {
-						crate::legacy_v1::share_local_statement(
-							ctx,
-							legacy_v1_state,
-							relay_parent,
-							StatementWithPVD::drop_pvd_from_signed(statement),
-							&mut self.rng,
-							metrics,
-						)
-						.await?;
-					} else {
-						v2::share_local_statement(
-							ctx,
-							state,
-							relay_parent,
-							statement,
-							&mut self.reputation,
-							&self.metrics,
-						)
-						.await?;
-					}
+					v2::share_local_statement(
+						ctx,
+						state,
+						relay_parent,
+						statement,
+						&mut self.reputation,
+						&self.metrics,
+					)
+					.await?;
 				},
 				StatementDistributionMessage::NetworkBridgeUpdate(event) => {
 					// pass all events to both protocols except for messages,
diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs
index 6bb49e5de13dd35ed0d285dd2789b6f12d9e702c..3034ca7caf851df09c267880991f15718abeb277 100644
--- a/polkadot/node/network/statement-distribution/src/v2/mod.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs
@@ -45,10 +45,7 @@ use polkadot_node_subsystem::{
 use polkadot_node_subsystem_util::{
 	backing_implicit_view::View as ImplicitView,
 	reputation::ReputationAggregator,
-	runtime::{
-		request_min_backing_votes, request_node_features, ClaimQueueSnapshot,
-		ProspectiveParachainsMode,
-	},
+	runtime::{request_min_backing_votes, request_node_features, ClaimQueueSnapshot},
 };
 use polkadot_primitives::{
 	node_features::FeatureIndex,
@@ -163,11 +160,11 @@ pub(crate) const REQUEST_RETRY_DELAY: Duration = Duration::from_secs(1);
 struct PerRelayParentState {
 	local_validator: Option<LocalValidatorState>,
 	statement_store: StatementStore,
-	seconding_limit: usize,
 	session: SessionIndex,
 	transposed_cq: TransposedClaimQueue,
 	groups_per_para: HashMap<ParaId, Vec<GroupIndex>>,
 	disabled_validators: HashSet<ValidatorIndex>,
+	assignments_per_group: HashMap<GroupIndex, Vec<ParaId>>,
 }
 
 impl PerRelayParentState {
@@ -304,8 +301,6 @@ impl PerSessionState {
 
 pub(crate) struct State {
 	/// The utility for managing the implicit and explicit views in a consistent way.
-	///
-	/// We only feed leaves which have prospective parachains enabled to this view.
 	implicit_view: ImplicitView,
 	candidates: Candidates,
 	per_relay_parent: HashMap<Hash, PerRelayParentState>,
@@ -572,23 +567,14 @@ pub(crate) async fn handle_network_update<Context>(
 	}
 }
 
-/// If there is a new leaf, this should only be called for leaves which support
-/// prospective parachains.
+/// Called on new leaf updates.
 #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
 pub(crate) async fn handle_active_leaves_update<Context>(
 	ctx: &mut Context,
 	state: &mut State,
 	activated: &ActivatedLeaf,
-	leaf_mode: ProspectiveParachainsMode,
 	metrics: &Metrics,
 ) -> JfyiErrorResult<()> {
-	let max_candidate_depth = match leaf_mode {
-		ProspectiveParachainsMode::Disabled => return Ok(()),
-		ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } => max_candidate_depth,
-	};
-
-	let seconding_limit = max_candidate_depth + 1;
-
 	state
 		.implicit_view
 		.activate_leaf(ctx.sender(), activated.hash)
@@ -697,27 +683,21 @@ pub(crate) async fn handle_active_leaves_update<Context>(
 				.map_err(JfyiError::FetchClaimQueue)?,
 		);
 
+		let (groups_per_para, assignments_per_group) = determine_group_assignments(
+			per_session.groups.all().len(),
+			group_rotation_info,
+			&claim_queue,
+		)
+		.await;
+
 		let local_validator = per_session.local_validator.and_then(|v| {
 			if let LocalValidatorIndex::Active(idx) = v {
-				find_active_validator_state(
-					idx,
-					&per_session.groups,
-					&group_rotation_info,
-					&claim_queue,
-					seconding_limit,
-				)
+				find_active_validator_state(idx, &per_session.groups, &assignments_per_group)
 			} else {
 				Some(LocalValidatorState { grid_tracker: GridTracker::default(), active: None })
 			}
 		});
 
-		let groups_per_para = determine_groups_per_para(
-			per_session.groups.all().len(),
-			group_rotation_info,
-			&claim_queue,
-		)
-		.await;
-
 		let transposed_cq = transpose_claim_queue(claim_queue.0);
 
 		state.per_relay_parent.insert(
@@ -725,11 +705,11 @@ pub(crate) async fn handle_active_leaves_update<Context>(
 			PerRelayParentState {
 				local_validator,
 				statement_store: StatementStore::new(&per_session.groups),
-				seconding_limit,
 				session: session_index,
 				groups_per_para,
 				disabled_validators,
 				transposed_cq,
+				assignments_per_group,
 			},
 		);
 	}
@@ -769,9 +749,7 @@ pub(crate) async fn handle_active_leaves_update<Context>(
 fn find_active_validator_state(
 	validator_index: ValidatorIndex,
 	groups: &Groups,
-	group_rotation_info: &GroupRotationInfo,
-	claim_queue: &ClaimQueueSnapshot,
-	seconding_limit: usize,
+	assignments_per_group: &HashMap<GroupIndex, Vec<ParaId>>,
 ) -> Option<LocalValidatorState> {
 	if groups.all().is_empty() {
 		return None
@@ -779,17 +757,17 @@ fn find_active_validator_state(
 
 	let our_group = groups.by_validator_index(validator_index)?;
 
-	let core_index = group_rotation_info.core_for_group(our_group, groups.all().len());
-	let paras_assigned_to_core = claim_queue.iter_claims_for_core(&core_index).copied().collect();
 	let group_validators = groups.get(our_group)?.to_owned();
+	let paras_assigned_to_core = assignments_per_group.get(&our_group).cloned().unwrap_or_default();
+	let seconding_limit = paras_assigned_to_core.len();
 
 	Some(LocalValidatorState {
 		active: Some(ActiveValidatorState {
 			index: validator_index,
 			group: our_group,
-			assignments: paras_assigned_to_core,
 			cluster_tracker: ClusterTracker::new(group_validators, seconding_limit)
 				.expect("group is non-empty because we are in it; qed"),
+			assignments: paras_assigned_to_core.clone(),
 		}),
 		grid_tracker: GridTracker::default(),
 	})
@@ -1231,13 +1209,14 @@ pub(crate) async fn share_local_statement<Context>(
 		return Err(JfyiError::InvalidShare)
 	}
 
+	let seconding_limit = local_assignments.len();
+
 	if is_seconded &&
-		per_relay_parent.statement_store.seconded_count(&local_index) ==
-			per_relay_parent.seconding_limit
+		per_relay_parent.statement_store.seconded_count(&local_index) >= seconding_limit
 	{
 		gum::warn!(
 			target: LOG_TARGET,
-			limit = ?per_relay_parent.seconding_limit,
+			limit = ?seconding_limit,
 			"Local node has issued too many `Seconded` statements",
 		);
 		return Err(JfyiError::InvalidShare)
@@ -2183,12 +2162,14 @@ async fn provide_candidate_to_grid<Context>(
 	}
 }
 
-// Utility function to populate per relay parent `ParaId` to `GroupIndex` mappings.
-async fn determine_groups_per_para(
+// Utility function to populate:
+// - per relay parent `ParaId` to `GroupIndex` mappings.
+// - per `GroupIndex` claim queue assignments
+async fn determine_group_assignments(
 	n_cores: usize,
 	group_rotation_info: GroupRotationInfo,
 	claim_queue: &ClaimQueueSnapshot,
-) -> HashMap<ParaId, Vec<GroupIndex>> {
+) -> (HashMap<ParaId, Vec<GroupIndex>>, HashMap<GroupIndex, Vec<ParaId>>) {
 	// Determine the core indices occupied by each para at the current relay parent. To support
 	// on-demand parachains we also consider the core indices at next blocks.
 	let schedule: HashMap<CoreIndex, Vec<ParaId>> = claim_queue
@@ -2197,16 +2178,19 @@ async fn determine_groups_per_para(
 		.collect();
 
 	let mut groups_per_para = HashMap::new();
+	let mut assignments_per_group = HashMap::with_capacity(schedule.len());
+
 	// Map from `CoreIndex` to `GroupIndex` and collect as `HashMap`.
 	for (core_index, paras) in schedule {
 		let group_index = group_rotation_info.group_for_core(core_index, n_cores);
+		assignments_per_group.insert(group_index, paras.clone());
 
 		for para in paras {
 			groups_per_para.entry(para).or_insert_with(Vec::new).push(group_index);
 		}
 	}
 
-	groups_per_para
+	(groups_per_para, assignments_per_group)
 }
 
 #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
@@ -2351,10 +2335,7 @@ async fn handle_incoming_manifest_common<'a, Context>(
 	reputation: &mut ReputationAggregator,
 ) -> Option<ManifestImportSuccess<'a>> {
 	// 1. sanity checks: peer is connected, relay-parent in state, para ID matches group index.
-	let peer_state = match peers.get(&peer) {
-		None => return None,
-		Some(p) => p,
-	};
+	let peer_state = peers.get(&peer)?;
 
 	let relay_parent_state = match per_relay_parent.get_mut(&relay_parent) {
 		None => {
@@ -2370,10 +2351,7 @@ async fn handle_incoming_manifest_common<'a, Context>(
 		Some(s) => s,
 	};
 
-	let per_session = match per_session.get(&relay_parent_state.session) {
-		None => return None,
-		Some(s) => s,
-	};
+	let per_session = per_session.get(&relay_parent_state.session)?;
 
 	if relay_parent_state.local_validator.is_none() {
 		if per_session.is_not_validator() {
@@ -2398,10 +2376,7 @@ async fn handle_incoming_manifest_common<'a, Context>(
 		return None
 	}
 
-	let grid_topology = match per_session.grid_view.as_ref() {
-		None => return None,
-		Some(x) => x,
-	};
+	let grid_topology = per_session.grid_view.as_ref()?;
 
 	let sender_index = grid_topology
 		.iter_sending_for_group(manifest_summary.claimed_group_index, manifest_kind)
@@ -2436,11 +2411,18 @@ async fn handle_incoming_manifest_common<'a, Context>(
 
 	let local_validator = relay_parent_state.local_validator.as_mut().expect("checked above; qed");
 
+	let seconding_limit = relay_parent_state
+		.assignments_per_group
+		.get(&group_index)?
+		.iter()
+		.filter(|para| para == &&para_id)
+		.count();
+
 	let acknowledge = match local_validator.grid_tracker.import_manifest(
 		grid_topology,
 		&per_session.groups,
 		candidate_hash,
-		relay_parent_state.seconding_limit,
+		seconding_limit,
 		manifest_summary,
 		manifest_kind,
 		sender_index,
@@ -3015,7 +2997,7 @@ pub(crate) async fn dispatch_requests<Context>(ctx: &mut Context, state: &mut St
 		let relay_parent_state = state.per_relay_parent.get(&relay_parent)?;
 		let per_session = state.per_session.get(&relay_parent_state.session)?;
 		let group = per_session.groups.get(group_index)?;
-		let seconding_limit = relay_parent_state.seconding_limit;
+		let seconding_limit = relay_parent_state.assignments_per_group.get(&group_index)?.len();
 
 		// Request nothing which would be an 'over-seconded' statement.
 		let mut unwanted_mask = StatementFilter::blank(group.len());
diff --git a/polkadot/node/network/statement-distribution/src/v2/statement_store.rs b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs
index 56a54f6316c070df0111d22bbc9649fb8de6d0b4..e1dfc2f5a2faaecee68926fae4f1fc298ae87821 100644
--- a/polkadot/node/network/statement-distribution/src/v2/statement_store.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/statement_store.rs
@@ -217,7 +217,7 @@ impl StatementStore {
 		&'a self,
 		validators: &'a [ValidatorIndex],
 		candidate_hash: CandidateHash,
-	) -> impl Iterator<Item = &SignedStatement> + 'a {
+	) -> impl Iterator<Item = &'a SignedStatement> + 'a {
 		let s_st = CompactStatement::Seconded(candidate_hash);
 		let v_st = CompactStatement::Valid(candidate_hash);
 
diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs
index 040123f1774cf727341be0bd0a1899b2430b51bd..750bcca9cb50b2d11c316bcc7b01c7dfe9064495 100644
--- a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs
@@ -24,7 +24,6 @@ fn share_seconded_circulated_to_cluster() {
 		validator_count: 20,
 		group_size: 3,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -125,7 +124,6 @@ fn cluster_valid_statement_before_seconded_ignored() {
 		validator_count: 20,
 		group_size: 3,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -186,7 +184,6 @@ fn cluster_statement_bad_signature() {
 		validator_count: 20,
 		group_size: 3,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -260,7 +257,6 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() {
 		validator_count: 20,
 		group_size: 3,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -323,7 +319,6 @@ fn elastic_scaling_useful_cluster_statement_from_non_cluster_peer_rejected() {
 		validator_count: 20,
 		group_size: 3,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -383,7 +378,6 @@ fn statement_from_non_cluster_originator_unexpected() {
 		validator_count: 20,
 		group_size: 3,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -439,7 +433,6 @@ fn seconded_statement_leads_to_request() {
 		validator_count: 20,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -528,7 +521,6 @@ fn cluster_statements_shared_seconded_first() {
 		validator_count: 20,
 		group_size: 3,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -643,7 +635,6 @@ fn cluster_accounts_for_implicit_view() {
 		validator_count: 20,
 		group_size: 3,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -743,8 +734,7 @@ fn cluster_accounts_for_implicit_view() {
 		// peer B never had the relay parent in its view, so this tests that
 		// the implicit view is working correctly for B.
 		//
-		// the fact that the statement isn't sent again to A also indicates that it works
-		// it's working.
+		// the fact that the statement isn't sent again to A also indicates that it's working.
 		assert_matches!(
 			overseer.recv().await,
 			AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessages(messages)) => {
@@ -780,7 +770,6 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() {
 		validator_count: 20,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -904,7 +893,6 @@ fn cluster_messages_imported_after_new_leaf_importable_check() {
 		validator_count: 20,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -1033,18 +1021,14 @@ fn cluster_messages_imported_after_new_leaf_importable_check() {
 
 #[test]
 fn ensure_seconding_limit_is_respected() {
-	// `max_candidate_depth: 1` for a `seconding_limit` of 2.
+	// use a scheduling_lookahead of two to restrict the per-core seconding limit to 2.
+	let scheduling_lookahead = 2;
 	let config = TestConfig {
 		validator_count: 20,
 		group_size: 4,
 		local_validator: LocalRole::Validator,
-		async_backing_params: Some(AsyncBackingParams {
-			max_candidate_depth: 1,
-			allowed_ancestry_len: 3,
-		}),
 		allow_v2_descriptors: false,
 	};
-
 	let relay_parent = Hash::repeat_byte(1);
 	let peer_a = PeerId::random();
 
@@ -1053,7 +1037,8 @@ fn ensure_seconding_limit_is_respected() {
 		let local_group_index = local_validator.group_index.unwrap();
 		let local_para = ParaId::from(local_group_index.0);
 
-		let test_leaf = state.make_dummy_leaf(relay_parent);
+		let test_leaf =
+			state.make_dummy_leaf_with_scheduling_lookahead(relay_parent, scheduling_lookahead);
 
 		let (candidate_1, pvd_1) = make_candidate(
 			relay_parent,
@@ -1232,3 +1217,117 @@ fn ensure_seconding_limit_is_respected() {
 		overseer
 	});
 }
+
+#[test]
+fn delayed_reputation_changes() {
+	let config = TestConfig {
+		validator_count: 20,
+		group_size: 3,
+		local_validator: LocalRole::Validator,
+		allow_v2_descriptors: false,
+	};
+
+	let keystore = test_helpers::mock::make_ferdie_keystore();
+	let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
+	let (statement_req_receiver, _) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
+	let (candidate_req_receiver, req_cfg) = IncomingRequest::get_config_receiver::<
+		Block,
+		sc_network::NetworkWorker<Block, Hash>,
+	>(&req_protocol_names);
+	let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(0);
+
+	let state = TestState::from_config(config, req_cfg.inbound_queue.unwrap(), &mut rng);
+
+	// We can't use the test harness as we need to spawn our own subsystem with custom config.
+	let (context, mut virtual_overseer) =
+		polkadot_node_subsystem_test_helpers::make_subsystem_context(
+			sp_core::testing::TaskExecutor::new(),
+		);
+	let subsystem = async move {
+		let subsystem = crate::StatementDistributionSubsystem {
+			keystore,
+			v1_req_receiver: Some(statement_req_receiver),
+			req_receiver: Some(candidate_req_receiver),
+			metrics: Default::default(),
+			rng,
+			reputation: ReputationAggregator::new(|_| false),
+		};
+
+		if let Err(e) = subsystem.run_inner(context, Duration::from_millis(100)).await {
+			panic!("Fatal error: {:?}", e);
+		}
+	};
+
+	let test_fut = async move {
+		let relay_parent = Hash::repeat_byte(1);
+		let peer_a = PeerId::random();
+
+		let local_validator = state.local.clone().unwrap();
+		let local_group_index = local_validator.group_index.unwrap();
+		let candidate_hash = CandidateHash(Hash::repeat_byte(42));
+
+		let test_leaf = state.make_dummy_leaf(relay_parent);
+
+		// peer A is in group, has relay parent in view.
+		let other_group_validators = state.group_validators(local_group_index, true);
+		let v_a = other_group_validators[0];
+		connect_peer(
+			&mut virtual_overseer,
+			peer_a.clone(),
+			Some(vec![state.discovery_id(v_a)].into_iter().collect()),
+		)
+		.await;
+
+		send_peer_view_change(&mut virtual_overseer, peer_a.clone(), view![relay_parent]).await;
+		activate_leaf(&mut virtual_overseer, &test_leaf, &state, true, vec![]).await;
+
+		let signed_valid = state.sign_statement(
+			v_a,
+			CompactStatement::Valid(candidate_hash),
+			&SigningContext { parent_hash: relay_parent, session_index: 1 },
+		);
+
+		send_peer_message(
+			&mut virtual_overseer,
+			peer_a.clone(),
+			protocol_v2::StatementDistributionMessage::Statement(
+				relay_parent,
+				signed_valid.as_unchecked().clone(),
+			),
+		)
+		.await;
+
+		assert_matches!(virtual_overseer.rx.next().timeout(Duration::from_millis(50)).await, None);
+		// Wait enough to fire reputation delay
+		futures_timer::Delay::new(Duration::from_millis(60)).await;
+
+		assert_matches!(
+			virtual_overseer.recv().await,
+			AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Batch(reps))) => {
+				let mut expected = HashMap::new();
+				expected.insert(peer_a, COST_UNEXPECTED_STATEMENT_CLUSTER_REJECTED.cost_or_benefit());
+				assert_eq!(expected, reps);
+			}
+		);
+
+		virtual_overseer
+	};
+
+	futures::pin_mut!(test_fut);
+	futures::pin_mut!(subsystem);
+	futures::executor::block_on(future::join(
+		async move {
+			let mut virtual_overseer = test_fut.await;
+			// Ensure we have handled all responses.
+			if let Ok(Some(msg)) = virtual_overseer.rx.try_next() {
+				panic!("Did not handle all responses: {:?}", msg);
+			}
+			// Conclude.
+			virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
+		},
+		subsystem,
+	));
+}
diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs
index 0133d9e219f6eafacf0d1f73f6c261eb550a9af2..494e2a7f5dbf57a9ed7b9d659f23367d6626f6c7 100644
--- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs
@@ -30,7 +30,6 @@ fn backed_candidate_leads_to_advertisement() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -240,7 +239,6 @@ fn received_advertisement_before_confirmation_leads_to_request() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -413,7 +411,6 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -595,7 +592,6 @@ fn receive_ack_for_unconfirmed_candidate() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -657,7 +653,6 @@ fn received_acknowledgements_for_locally_confirmed() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -820,7 +815,6 @@ fn received_acknowledgements_for_externally_confirmed() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -956,7 +950,6 @@ fn received_advertisement_after_confirmation_before_backing() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -1135,7 +1128,6 @@ fn additional_statements_are_shared_after_manifest_exchange() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -1423,7 +1415,6 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -1637,7 +1628,6 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -1849,7 +1839,6 @@ fn inner_grid_statements_imported_to_backing(groups_for_first_para: usize) {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -2058,7 +2047,6 @@ fn advertisements_rejected_from_incorrect_peers() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -2195,7 +2183,6 @@ fn manifest_rejected_with_unknown_relay_parent() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -2293,7 +2280,6 @@ fn manifest_rejected_when_not_a_validator() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::None,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -2387,7 +2373,6 @@ fn manifest_rejected_when_group_does_not_match_para() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -2486,7 +2471,6 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -2677,7 +2661,6 @@ fn inactive_local_participates_in_grid() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::InactiveValidator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs
index 46b72f5adac9859f05f3ee9733a9b960b991715b..5a9b8efa2a13b60144a51a8fb9b26ec5358d8e32 100644
--- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs
@@ -24,7 +24,7 @@ use polkadot_node_network_protocol::{
 	v2::{BackedCandidateAcknowledgement, BackedCandidateManifest},
 	view, ObservedRole,
 };
-use polkadot_node_primitives::Statement;
+use polkadot_node_primitives::{Statement, StatementWithPVD};
 use polkadot_node_subsystem::messages::{
 	network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, HypotheticalCandidate,
 	HypotheticalMembership, NetworkBridgeEvent, ProspectiveParachainsMessage, ReportPeerMessage,
@@ -33,9 +33,9 @@ use polkadot_node_subsystem::messages::{
 use polkadot_node_subsystem_test_helpers as test_helpers;
 use polkadot_node_subsystem_util::TimeoutExt;
 use polkadot_primitives::{
-	vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, AssignmentPair,
-	AsyncBackingParams, Block, BlockNumber, GroupRotationInfo, HeadData, Header, IndexedVec,
-	PersistedValidationData, SessionIndex, SessionInfo, ValidatorPair,
+	vstaging::CommittedCandidateReceiptV2 as CommittedCandidateReceipt, AssignmentPair, Block,
+	BlockNumber, GroupRotationInfo, HeadData, Header, IndexedVec, PersistedValidationData,
+	SessionIndex, SessionInfo, ValidatorPair, DEFAULT_SCHEDULING_LOOKAHEAD,
 };
 use sc_keystore::LocalKeystore;
 use sc_network::ProtocolName;
@@ -58,9 +58,6 @@ mod requests;
 type VirtualOverseer =
 	polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle<StatementDistributionMessage>;
 
-const DEFAULT_ASYNC_BACKING_PARAMETERS: AsyncBackingParams =
-	AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 };
-
 // Some deterministic genesis hash for req/res protocol names
 const GENESIS_HASH: Hash = Hash::repeat_byte(0xff);
 
@@ -81,7 +78,6 @@ struct TestConfig {
 	group_size: usize,
 	// whether the local node should be a validator
 	local_validator: LocalRole,
-	async_backing_params: Option<AsyncBackingParams>,
 	// allow v2 descriptors (feature bit)
 	allow_v2_descriptors: bool,
 }
@@ -187,23 +183,26 @@ impl TestState {
 	}
 
 	fn make_dummy_leaf(&self, relay_parent: Hash) -> TestLeaf {
-		self.make_dummy_leaf_with_multiple_cores_per_para(relay_parent, 1)
+		self.make_dummy_leaf_inner(relay_parent, 1, DEFAULT_SCHEDULING_LOOKAHEAD as usize)
 	}
 
-	fn make_dummy_leaf_with_multiple_cores_per_para(
+	fn make_dummy_leaf_inner(
 		&self,
 		relay_parent: Hash,
 		groups_for_first_para: usize,
+		scheduling_lookahead: usize,
 	) -> TestLeaf {
 		let mut cq = std::collections::BTreeMap::new();
 
 		for i in 0..self.session_info.validator_groups.len() {
 			if i < groups_for_first_para {
-				cq.entry(CoreIndex(i as u32))
-					.or_insert_with(|| vec![ParaId::from(0u32), ParaId::from(0u32)].into());
+				cq.entry(CoreIndex(i as u32)).or_insert_with(|| {
+					std::iter::repeat(ParaId::from(0u32)).take(scheduling_lookahead).collect()
+				});
 			} else {
-				cq.entry(CoreIndex(i as u32))
-					.or_insert_with(|| vec![ParaId::from(i), ParaId::from(i)].into());
+				cq.entry(CoreIndex(i as u32)).or_insert_with(|| {
+					std::iter::repeat(ParaId::from(i)).take(scheduling_lookahead).collect()
+				});
 			};
 		}
 
@@ -229,6 +228,26 @@ impl TestState {
 		}
 	}
 
+	fn make_dummy_leaf_with_scheduling_lookahead(
+		&self,
+		relay_parent: Hash,
+		scheduling_lookahead: usize,
+	) -> TestLeaf {
+		self.make_dummy_leaf_inner(relay_parent, 1, scheduling_lookahead)
+	}
+
+	fn make_dummy_leaf_with_multiple_cores_per_para(
+		&self,
+		relay_parent: Hash,
+		groups_for_first_para: usize,
+	) -> TestLeaf {
+		self.make_dummy_leaf_inner(
+			relay_parent,
+			groups_for_first_para,
+			DEFAULT_SCHEDULING_LOOKAHEAD as usize,
+		)
+	}
+
 	fn make_dummy_leaf_with_disabled_validators(
 		&self,
 		relay_parent: Hash,
@@ -588,15 +607,6 @@ async fn handle_leaf_activation(
 		claim_queue,
 	} = leaf;
 
-	assert_matches!(
-		virtual_overseer.recv().await,
-		AllMessages::RuntimeApi(
-			RuntimeApiMessage::Request(parent, RuntimeApiRequest::AsyncBackingParams(tx))
-		) if parent == *hash => {
-			tx.send(Ok(test_state.config.async_backing_params.unwrap_or(DEFAULT_ASYNC_BACKING_PARAMETERS))).unwrap();
-		}
-	);
-
 	let header = Header {
 		parent_hash: *parent_hash,
 		number: *number,
diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs
index fc880c1d9a836d9ece1475c687d81b1e5b8fea37..91df8e0a2f8c3417a23452f80aa205526595c775 100644
--- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs
@@ -38,7 +38,6 @@ fn cluster_peer_allowed_to_send_incomplete_statements(#[case] allow_v2_descripto
 		validator_count: 20,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors,
 	};
 
@@ -202,14 +201,12 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: Some(AsyncBackingParams {
-			// Makes `seconding_limit: 2` (easier to hit the limit).
-			max_candidate_depth: 1,
-			allowed_ancestry_len: 3,
-		}),
 		allow_v2_descriptors: false,
 	};
 
+	// use a scheduling_lookahead of two to restrict the per-core seconding limit to 2.
+	let scheduling_lookahead = 2;
+
 	let relay_parent = Hash::repeat_byte(1);
 	let peer_c = PeerId::random();
 	let peer_d = PeerId::random();
@@ -222,7 +219,8 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() {
 		let other_group = next_group_index(local_group_index, validator_count, group_size);
 		let other_para = ParaId::from(other_group.0);
 
-		let test_leaf = state.make_dummy_leaf(relay_parent);
+		let test_leaf =
+			state.make_dummy_leaf_with_scheduling_lookahead(relay_parent, scheduling_lookahead);
 
 		let (candidate_1, pvd_1) = make_candidate(
 			relay_parent,
@@ -482,7 +480,6 @@ fn peer_reported_for_not_enough_statements() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -670,7 +667,6 @@ fn peer_reported_for_duplicate_statements() {
 		validator_count: 20,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -824,7 +820,6 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() {
 		validator_count: 20,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -956,7 +951,6 @@ fn peer_reported_for_invalid_v2_descriptor() {
 		validator_count: 20,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: true,
 	};
 
@@ -988,7 +982,6 @@ fn peer_reported_for_invalid_v2_descriptor() {
 		let other_group_validators = state.group_validators(local_group_index, true);
 		let v_a = other_group_validators[0];
 		let v_b = other_group_validators[1];
-		let v_c = other_group_validators[1];
 
 		// peer A is in group, has relay parent in view.
 		// peer B is in group, has no relay parent in view.
@@ -997,14 +990,14 @@ fn peer_reported_for_invalid_v2_descriptor() {
 			connect_peer(
 				&mut overseer,
 				peer_a.clone(),
-				Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()),
+				Some(vec![state.discovery_id(v_a)].into_iter().collect()),
 			)
 			.await;
 
 			connect_peer(
 				&mut overseer,
 				peer_b.clone(),
-				Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()),
+				Some(vec![state.discovery_id(v_b)].into_iter().collect()),
 			)
 			.await;
 
@@ -1177,11 +1170,17 @@ fn peer_reported_for_invalid_v2_descriptor() {
 				.clone();
 			let statements = vec![b_seconded_invalid.clone()];
 
+			// v_a has exhausted its seconded statements (3).
+			let mut statement_filter = StatementFilter::blank(group_size);
+			statement_filter
+				.seconded_in_group
+				.set(state.index_within_group(local_group_index, v_a).unwrap(), true);
+
 			handle_sent_request(
 				&mut overseer,
 				peer_a,
 				candidate_hash,
-				StatementFilter::blank(group_size),
+				statement_filter,
 				candidate.clone(),
 				pvd.clone(),
 				statements,
@@ -1213,7 +1212,7 @@ fn peer_reported_for_invalid_v2_descriptor() {
 					assert_eq!(peers, vec![peer_a.clone()]);
 					assert_eq!(r, relay_parent);
 					assert_eq!(s.unchecked_payload(), &CompactStatement::Seconded(candidate_hash));
-					assert_eq!(s.unchecked_validator_index(), v_c);
+					assert_eq!(s.unchecked_validator_index(), v_b);
 				}
 			);
 
@@ -1234,7 +1233,6 @@ fn v2_descriptors_filtered(#[case] allow_v2_descriptors: bool) {
 		validator_count: 20,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors,
 	};
 
@@ -1365,7 +1363,6 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() {
 		validator_count: 20,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -1496,7 +1493,6 @@ fn disabled_validators_added_to_unwanted_mask() {
 		validator_count: 20,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -1663,7 +1659,6 @@ fn disabling_works_from_relay_parent_not_the_latest_state() {
 		validator_count: 20,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -1863,7 +1858,6 @@ fn local_node_sanity_checks_incoming_requests() {
 		validator_count: 20,
 		group_size: 3,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -2065,7 +2059,6 @@ fn local_node_checks_that_peer_can_request_before_responding() {
 		validator_count: 20,
 		group_size: 3,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -2265,7 +2258,6 @@ fn local_node_respects_statement_mask() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
@@ -2508,7 +2500,6 @@ fn should_delay_before_retrying_dropped_requests() {
 		validator_count,
 		group_size,
 		local_validator: LocalRole::Validator,
-		async_backing_params: None,
 		allow_v2_descriptors: false,
 	};
 
diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs
index 3881ddbcc9046dfb1f805c82a46c824d169e19f6..17470d74577d2147bb4faf2461aa2e1faa85ffae 100644
--- a/polkadot/node/overseer/src/lib.rs
+++ b/polkadot/node/overseer/src/lib.rs
@@ -533,7 +533,6 @@ pub struct Overseer<SupportsParachains> {
 	#[subsystem(ProvisionerMessage, sends: [
 		RuntimeApiMessage,
 		CandidateBackingMessage,
-		ChainApiMessage,
 		DisputeCoordinatorMessage,
 		ProspectiveParachainsMessage,
 	])]
diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs
index e4ea6efeaac22bacf78074b3ba21c53fa160ff37..a90570a149b52807d1da3c5dc47a13e7d4d1fbdd 100644
--- a/polkadot/node/service/src/overseer.rs
+++ b/polkadot/node/service/src/overseer.rs
@@ -600,7 +600,7 @@ pub fn collator_overseer_builder<Spawner, RuntimeClient>(
 		network_service,
 		sync_service,
 		authority_discovery_service,
-		collation_req_v1_receiver,
+		collation_req_v1_receiver: _,
 		collation_req_v2_receiver,
 		available_data_req_receiver,
 		registry,
@@ -703,7 +703,6 @@ where
 				IsParachainNode::Collator(collator_pair) => ProtocolSide::Collator {
 					peer_id: network_service.local_peer_id(),
 					collator_pair,
-					request_receiver_v1: collation_req_v1_receiver,
 					request_receiver_v2: collation_req_v2_receiver,
 					metrics: Metrics::register(registry)?,
 				},
diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs
index 8a3b91b3ec741baa9003b0c637567b8f79e614d7..4d4fc89a6addc10866ff7c2b00af75c7a28b2ae3 100644
--- a/polkadot/node/subsystem-types/src/messages.rs
+++ b/polkadot/node/subsystem-types/src/messages.rs
@@ -775,6 +775,9 @@ pub enum RuntimeApiRequest {
 	/// Get the backing constraints for a particular parachain.
 	/// `V12`
 	BackingConstraints(ParaId, RuntimeApiSender<Option<Constraints>>),
+	/// Get the lookahead from the scheduler params.
+	/// `V12`
+	SchedulingLookahead(SessionIndex, RuntimeApiSender<u32>),
 }
 
 impl RuntimeApiRequest {
@@ -818,6 +821,9 @@ impl RuntimeApiRequest {
 
 	/// `backing_constraints`
 	pub const CONSTRAINTS_RUNTIME_REQUIREMENT: u32 = 12;
+
+	/// `SchedulingLookahead`
+	pub const SCHEDULING_LOOKAHEAD_RUNTIME_REQUIREMENT: u32 = 12;
 }
 
 /// A message to the Runtime API subsystem.
@@ -852,9 +858,6 @@ pub enum StatementDistributionMessage {
 pub enum ProvisionableData {
 	/// This bitfield indicates the availability of various candidate blocks.
 	Bitfield(Hash, SignedAvailabilityBitfield),
-	/// The Candidate Backing subsystem believes that this candidate is valid, pending
-	/// availability.
-	BackedCandidate(CandidateReceipt),
 	/// Misbehavior reports are self-contained proofs of validator misbehavior.
 	MisbehaviorReport(Hash, ValidatorIndex, Misbehavior),
 	/// Disputes trigger a broad dispute resolution process.
diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs
index 018b52bedcd24fd57fbddaf55c769a99db4a8765..7e3849c20694dc2343bcd32e644b79af03ac761e 100644
--- a/polkadot/node/subsystem-types/src/runtime_client.rs
+++ b/polkadot/node/subsystem-types/src/runtime_client.rs
@@ -241,23 +241,18 @@ pub trait RuntimeApiSubsystemClient {
 	/***** Added in v3 **** */
 
 	/// Returns all onchain disputes.
-	/// This is a staging method! Do not use on production runtimes!
 	async fn disputes(
 		&self,
 		at: Hash,
 	) -> Result<Vec<(SessionIndex, CandidateHash, DisputeState<BlockNumber>)>, ApiError>;
 
 	/// Returns a list of validators that lost a past session dispute and need to be slashed.
-	///
-	/// WARNING: This is a staging method! Do not use on production runtimes!
 	async fn unapplied_slashes(
 		&self,
 		at: Hash,
 	) -> Result<Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>, ApiError>;
 
 	/// Returns a merkle proof of a validator session key in a past session.
-	///
-	/// WARNING: This is a staging method! Do not use on production runtimes!
 	async fn key_ownership_proof(
 		&self,
 		at: Hash,
@@ -266,8 +261,6 @@ pub trait RuntimeApiSubsystemClient {
 
 	/// Submits an unsigned extrinsic to slash validators who lost a dispute about
 	/// a candidate of a past session.
-	///
-	/// WARNING: This is a staging method! Do not use on production runtimes!
 	async fn submit_report_dispute_lost(
 		&self,
 		at: Hash,
@@ -356,6 +349,10 @@ pub trait RuntimeApiSubsystemClient {
 		at: Hash,
 		para_id: Id,
 	) -> Result<Option<Constraints>, ApiError>;
+
+	// === v12 ===
+	/// Fetch the scheduling lookahead value
+	async fn scheduling_lookahead(&self, at: Hash) -> Result<u32, ApiError>;
 }
 
 /// Default implementation of [`RuntimeApiSubsystemClient`] using the client.
@@ -641,6 +638,10 @@ where
 	) -> Result<Option<Constraints>, ApiError> {
 		self.client.runtime_api().backing_constraints(at, para_id)
 	}
+
+	async fn scheduling_lookahead(&self, at: Hash) -> Result<u32, ApiError> {
+		self.client.runtime_api().scheduling_lookahead(at)
+	}
 }
 
 impl<Client, Block> HeaderBackend<Block> for DefaultSubsystemClient<Client>
diff --git a/polkadot/node/subsystem-util/src/backing_implicit_view.rs b/polkadot/node/subsystem-util/src/backing_implicit_view.rs
index 67f5dad518e18ed71ae540db804a71a6681b0f9a..d8e242109955a1ef778c88d6f5161dd05b4feb8a 100644
--- a/polkadot/node/subsystem-util/src/backing_implicit_view.rs
+++ b/polkadot/node/subsystem-util/src/backing_implicit_view.rs
@@ -20,14 +20,14 @@ use polkadot_node_subsystem::{
 	messages::{ChainApiMessage, ProspectiveParachainsMessage, RuntimeApiMessage},
 	SubsystemSender,
 };
-use polkadot_primitives::{AsyncBackingParams, BlockNumber, Hash, Id as ParaId};
+use polkadot_primitives::{BlockNumber, Hash, Id as ParaId};
 
 use std::collections::{HashMap, HashSet};
 
 use crate::{
 	inclusion_emulator::RelayChainBlockInfo,
-	request_async_backing_params, request_session_index_for_child,
-	runtime::{self, recv_runtime},
+	request_session_index_for_child,
+	runtime::{self, fetch_scheduling_lookahead, recv_runtime},
 	LOG_TARGET,
 };
 
@@ -149,6 +149,11 @@ impl View {
 		self.leaves.keys()
 	}
 
+	/// Check if the given block hash is an active leaf of the current view.
+	pub fn contains_leaf(&self, leaf_hash: &Hash) -> bool {
+		self.leaves.contains_key(leaf_hash)
+	}
+
 	/// Activate a leaf in the view.
 	/// This will request the minimum relay parents the leaf and will load headers in the
 	/// ancestry of the leaf as needed. These are the 'implicit ancestors' of the leaf.
@@ -590,22 +595,22 @@ where
 		+ SubsystemSender<RuntimeApiMessage>
 		+ SubsystemSender<ChainApiMessage>,
 {
-	let AsyncBackingParams { allowed_ancestry_len, .. } =
-		recv_runtime(request_async_backing_params(leaf_hash, sender).await).await?;
-
 	// Fetch the session of the leaf. We must make sure that we stop at the ancestor which has a
 	// different session index.
 	let required_session =
 		recv_runtime(request_session_index_for_child(leaf_hash, sender).await).await?;
 
+	let scheduling_lookahead =
+		fetch_scheduling_lookahead(leaf_hash, required_session, sender).await?;
+
 	let mut min = leaf_number;
 
-	// Fetch the ancestors, up to allowed_ancestry_len.
+	// Fetch the ancestors, up to (scheduling_lookahead - 1).
 	let (tx, rx) = oneshot::channel();
 	sender
 		.send_message(ChainApiMessage::Ancestors {
 			hash: leaf_hash,
-			k: allowed_ancestry_len as usize,
+			k: scheduling_lookahead.saturating_sub(1) as usize,
 			response_channel: tx,
 		})
 		.await;
@@ -642,7 +647,7 @@ mod tests {
 		make_subsystem_context, TestSubsystemContextHandle,
 	};
 	use polkadot_overseer::SubsystemContext;
-	use polkadot_primitives::{AsyncBackingParams, Header};
+	use polkadot_primitives::Header;
 	use sp_core::testing::TaskExecutor;
 	use std::time::Duration;
 
@@ -743,23 +748,24 @@ mod tests {
 		);
 	}
 
-	async fn assert_async_backing_params_request(
+	async fn assert_scheduling_lookahead_request(
 		virtual_overseer: &mut VirtualOverseer,
 		leaf: Hash,
-		params: AsyncBackingParams,
+		lookahead: u32,
 	) {
 		assert_matches!(
 			overseer_recv(virtual_overseer).await,
 			AllMessages::RuntimeApi(
 				RuntimeApiMessage::Request(
 					leaf_hash,
-					RuntimeApiRequest::AsyncBackingParams(
+					RuntimeApiRequest::SchedulingLookahead(
+						_,
 						tx
 					)
 				)
 			) => {
 				assert_eq!(leaf, leaf_hash, "received unexpected leaf hash");
-				tx.send(Ok(params)).unwrap();
+				tx.send(Ok(lookahead)).unwrap();
 			}
 		);
 	}
@@ -946,18 +952,11 @@ mod tests {
 		let overseer_fut = async {
 			assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[leaf_idx..]).await;
 
-			assert_async_backing_params_request(
-				&mut ctx_handle,
-				*leaf,
-				AsyncBackingParams {
-					max_candidate_depth: 0,
-					allowed_ancestry_len: PARA_A_MIN_PARENT,
-				},
-			)
-			.await;
-
 			assert_session_index_request(&mut ctx_handle, *leaf, current_session).await;
 
+			assert_scheduling_lookahead_request(&mut ctx_handle, *leaf, PARA_A_MIN_PARENT + 1)
+				.await;
+
 			assert_ancestors_request(
 				&mut ctx_handle,
 				*leaf,
@@ -1020,18 +1019,11 @@ mod tests {
 		let overseer_fut = async {
 			assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks[leaf_idx..]).await;
 
-			assert_async_backing_params_request(
-				&mut ctx_handle,
-				*leaf,
-				AsyncBackingParams {
-					max_candidate_depth: 0,
-					allowed_ancestry_len: blocks.len() as u32,
-				},
-			)
-			.await;
-
 			assert_session_index_request(&mut ctx_handle, *leaf, current_session).await;
 
+			assert_scheduling_lookahead_request(&mut ctx_handle, *leaf, blocks.len() as u32 + 1)
+				.await;
+
 			assert_ancestors_request(
 				&mut ctx_handle,
 				*leaf,
diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs
index d84951ae1366518c5a7037dcbe227c9ec574ea66..dd843cfb01fa9030511ddd426417f85433d8802b 100644
--- a/polkadot/node/subsystem-util/src/runtime/mod.rs
+++ b/polkadot/node/subsystem-util/src/runtime/mod.rs
@@ -33,21 +33,20 @@ use polkadot_primitives::{
 	node_features::FeatureIndex,
 	slashing,
 	vstaging::{CandidateEvent, CoreState, OccupiedCore, ScrapedOnChainVotes},
-	AsyncBackingParams, CandidateHash, CoreIndex, EncodeAs, ExecutorParams, GroupIndex,
-	GroupRotationInfo, Hash, Id as ParaId, IndexedVec, NodeFeatures, SessionIndex, SessionInfo,
-	Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId,
-	ValidatorIndex, LEGACY_MIN_BACKING_VOTES,
+	CandidateHash, CoreIndex, EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash,
+	Id as ParaId, IndexedVec, NodeFeatures, SessionIndex, SessionInfo, Signed, SigningContext,
+	UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex,
+	DEFAULT_SCHEDULING_LOOKAHEAD, LEGACY_MIN_BACKING_VOTES,
 };
 
 use std::collections::{BTreeMap, VecDeque};
 
 use crate::{
-	has_required_runtime, request_async_backing_params, request_availability_cores,
-	request_candidate_events, request_claim_queue, request_disabled_validators,
-	request_from_runtime, request_key_ownership_proof, request_on_chain_votes,
-	request_session_executor_params, request_session_index_for_child, request_session_info,
-	request_submit_report_dispute_lost, request_unapplied_slashes, request_validation_code_by_hash,
-	request_validator_groups,
+	has_required_runtime, request_availability_cores, request_candidate_events,
+	request_claim_queue, request_disabled_validators, request_from_runtime,
+	request_key_ownership_proof, request_on_chain_votes, request_session_executor_params,
+	request_session_index_for_child, request_session_info, request_submit_report_dispute_lost,
+	request_unapplied_slashes, request_validation_code_by_hash, request_validator_groups,
 };
 
 /// Errors that can happen on runtime fetches.
@@ -469,64 +468,6 @@ where
 	.await
 }
 
-/// Prospective parachains mode of a relay parent. Defined by
-/// the Runtime API version.
-///
-/// Needed for the period of transition to asynchronous backing.
-#[derive(Debug, Copy, Clone)]
-pub enum ProspectiveParachainsMode {
-	/// Runtime API without support of `async_backing_params`: no prospective parachains.
-	Disabled,
-	/// v6 runtime API: prospective parachains.
-	Enabled {
-		/// The maximum number of para blocks between the para head in a relay parent
-		/// and a new candidate. Restricts nodes from building arbitrary long chains
-		/// and spamming other validators.
-		max_candidate_depth: usize,
-		/// How many ancestors of a relay parent are allowed to build candidates on top
-		/// of.
-		allowed_ancestry_len: usize,
-	},
-}
-
-impl ProspectiveParachainsMode {
-	/// Returns `true` if mode is enabled, `false` otherwise.
-	pub fn is_enabled(&self) -> bool {
-		matches!(self, ProspectiveParachainsMode::Enabled { .. })
-	}
-}
-
-/// Requests prospective parachains mode for a given relay parent based on
-/// the Runtime API version.
-pub async fn prospective_parachains_mode<Sender>(
-	sender: &mut Sender,
-	relay_parent: Hash,
-) -> Result<ProspectiveParachainsMode>
-where
-	Sender: SubsystemSender<RuntimeApiMessage>,
-{
-	let result = recv_runtime(request_async_backing_params(relay_parent, sender).await).await;
-
-	if let Err(error::Error::RuntimeRequest(RuntimeApiError::NotSupported { runtime_api_name })) =
-		&result
-	{
-		gum::trace!(
-			target: LOG_TARGET,
-			?relay_parent,
-			"Prospective parachains are disabled, {} is not supported by the current Runtime API",
-			runtime_api_name,
-		);
-
-		Ok(ProspectiveParachainsMode::Disabled)
-	} else {
-		let AsyncBackingParams { max_candidate_depth, allowed_ancestry_len } = result?;
-		Ok(ProspectiveParachainsMode::Enabled {
-			max_candidate_depth: max_candidate_depth as _,
-			allowed_ancestry_len: allowed_ancestry_len as _,
-		})
-	}
-}
-
 /// Request the min backing votes value.
 /// Prior to runtime API version 6, just return a hardcoded constant.
 pub async fn request_min_backing_votes(
@@ -655,27 +596,44 @@ pub async fn get_disabled_validators_with_fallback<Sender: SubsystemSender<Runti
 	Ok(disabled_validators)
 }
 
-/// Checks if the runtime supports `request_claim_queue` and attempts to fetch the claim queue.
-/// Returns `ClaimQueueSnapshot` or `None` if claim queue API is not supported by runtime.
-/// Any specific `RuntimeApiError` is bubbled up to the caller.
+/// Fetch the claim queue and wrap it into a helpful `ClaimQueueSnapshot`
 pub async fn fetch_claim_queue(
 	sender: &mut impl SubsystemSender<RuntimeApiMessage>,
 	relay_parent: Hash,
-) -> Result<Option<ClaimQueueSnapshot>> {
-	if has_required_runtime(
-		sender,
-		relay_parent,
-		RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT,
+) -> Result<ClaimQueueSnapshot> {
+	let cq = request_claim_queue(relay_parent, sender)
+		.await
+		.await
+		.map_err(Error::RuntimeRequestCanceled)??;
+
+	Ok(cq.into())
+}
+
+/// Checks if the runtime supports `request_claim_queue` and attempts to fetch the claim queue.
+/// Returns `ClaimQueueSnapshot` or `None` if claim queue API is not supported by runtime.
+pub async fn fetch_scheduling_lookahead(
+	parent: Hash,
+	session_index: SessionIndex,
+	sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
+) -> Result<u32> {
+	let res = recv_runtime(
+		request_from_runtime(parent, sender, |tx| {
+			RuntimeApiRequest::SchedulingLookahead(session_index, tx)
+		})
+		.await,
 	)
-	.await
-	{
-		let res = request_claim_queue(relay_parent, sender)
-			.await
-			.await
-			.map_err(Error::RuntimeRequestCanceled)??;
-		Ok(Some(res.into()))
+	.await;
+
+	if let Err(Error::RuntimeRequest(RuntimeApiError::NotSupported { .. })) = res {
+		gum::trace!(
+			target: LOG_TARGET,
+			?parent,
+			"Querying the scheduling lookahead from the runtime is not supported by the current Runtime API, falling back to default value of {}",
+			DEFAULT_SCHEDULING_LOOKAHEAD
+		);
+
+		Ok(DEFAULT_SCHEDULING_LOOKAHEAD)
 	} else {
-		gum::trace!(target: LOG_TARGET, "Runtime doesn't support `request_claim_queue`");
-		Ok(None)
+		res
 	}
 }
diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs
index 493f9fb5ba92ee66320ad6ead60e5bb1111213b9..361b66cf27f677fc2e7c6d52568ee65a799b8fdb 100644
--- a/polkadot/primitives/src/lib.rs
+++ b/polkadot/primitives/src/lib.rs
@@ -60,8 +60,8 @@ pub use v8::{
 	UncheckedSignedAvailabilityBitfields, UncheckedSignedStatement, UpgradeGoAhead,
 	UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode,
 	ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation,
-	ValidityError, ASSIGNMENT_KEY_TYPE_ID, LEGACY_MIN_BACKING_VOTES, LOWEST_PUBLIC_ID,
-	MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, MIN_CODE_SIZE,
+	ValidityError, ASSIGNMENT_KEY_TYPE_ID, DEFAULT_SCHEDULING_LOOKAHEAD, LEGACY_MIN_BACKING_VOTES,
+	LOWEST_PUBLIC_ID, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, MIN_CODE_SIZE,
 	ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER,
 	PARACHAIN_KEY_TYPE_ID,
 };
diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs
index df1dfbac4001383eeacd3fdc84084960d9e2c465..e0516a2f77e423345776df303171b2df7c385f89 100644
--- a/polkadot/primitives/src/runtime_api.rs
+++ b/polkadot/primitives/src/runtime_api.rs
@@ -303,5 +303,10 @@ sp_api::decl_runtime_apis! {
 		/// block.
 		#[api_version(12)]
 		fn backing_constraints(para_id: ppp::Id) -> Option<Constraints>;
+
+		/***** Added in v12 *****/
+		/// Retrieve the scheduling lookahead
+		#[api_version(12)]
+		fn scheduling_lookahead() -> u32;
 	}
 }
diff --git a/polkadot/primitives/src/v8/mod.rs b/polkadot/primitives/src/v8/mod.rs
index 7fc4c5b5c3f1d4a21b3addd50d396c50e95bd77d..93bb5ef23667288eb8a4a79e8589a39f32c742fc 100644
--- a/polkadot/primitives/src/v8/mod.rs
+++ b/polkadot/primitives/src/v8/mod.rs
@@ -444,6 +444,9 @@ pub const ON_DEMAND_MAX_QUEUE_MAX_SIZE: u32 = 1_000_000_000;
 /// prior to v9 configuration migration.
 pub const LEGACY_MIN_BACKING_VOTES: u32 = 2;
 
+/// Default value for `SchedulerParams.lookahead`
+pub const DEFAULT_SCHEDULING_LOOKAHEAD: u32 = 3;
+
 // The public key of a keypair used by a validator for determining assignments
 /// to approve included parachain candidates.
 mod assignment_app {
@@ -2132,11 +2135,13 @@ impl<BlockNumber: Default + From<u32>> Default for SchedulerParams<BlockNumber>
 }
 
 #[cfg(test)]
+/// Test helpers
 pub mod tests {
 	use super::*;
 	use bitvec::bitvec;
 	use sp_core::sr25519;
 
+	/// Create a dummy committed candidate receipt
 	pub fn dummy_committed_candidate_receipt() -> CommittedCandidateReceipt {
 		let zeros = Hash::zero();
 
diff --git a/polkadot/primitives/test-helpers/src/lib.rs b/polkadot/primitives/test-helpers/src/lib.rs
index 1717dd5b0edae7ee3c2d67c0da8a04403e79972a..b7cdfa83e10d32c3d0beaf5975b9af61c1adccc8 100644
--- a/polkadot/primitives/test-helpers/src/lib.rs
+++ b/polkadot/primitives/test-helpers/src/lib.rs
@@ -381,22 +381,30 @@ pub struct TestCandidateBuilder {
 	pub pov_hash: Hash,
 	pub relay_parent: Hash,
 	pub commitments_hash: Hash,
+	pub core_index: CoreIndex,
 }
 
 impl std::default::Default for TestCandidateBuilder {
 	fn default() -> Self {
 		let zeros = Hash::zero();
-		Self { para_id: 0.into(), pov_hash: zeros, relay_parent: zeros, commitments_hash: zeros }
+		Self {
+			para_id: 0.into(),
+			pov_hash: zeros,
+			relay_parent: zeros,
+			commitments_hash: zeros,
+			core_index: CoreIndex(0),
+		}
 	}
 }
 
 impl TestCandidateBuilder {
 	/// Build a `CandidateReceipt`.
 	pub fn build(self) -> CandidateReceiptV2 {
-		let mut descriptor = dummy_candidate_descriptor(self.relay_parent);
-		descriptor.para_id = self.para_id;
-		descriptor.pov_hash = self.pov_hash;
-		CandidateReceipt { descriptor, commitments_hash: self.commitments_hash }.into()
+		let mut descriptor = dummy_candidate_descriptor_v2(self.relay_parent);
+		descriptor.set_para_id(self.para_id);
+		descriptor.set_pov_hash(self.pov_hash);
+		descriptor.set_core_index(self.core_index);
+		CandidateReceiptV2 { descriptor, commitments_hash: self.commitments_hash }
 	}
 }
 
diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs
index 4c1394fd1347395371a581619d56a70df1bc9c73..b3057c25d8563cb1de61216e80b1373000312ee5 100644
--- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs
+++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs
@@ -329,7 +329,7 @@ impl<T: Config> Pallet<T> {
 						})
 						.collect(),
 					parent_number,
-					config.async_backing_params.allowed_ancestry_len,
+					config.scheduler_params.lookahead,
 				);
 			});
 		}
diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs b/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs
index 3f2cb5771098750e965d5efaef0899bdb47ae438..133d4ca1877d2ba1b2900e10e9d886fc5bbe95fa 100644
--- a/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs
+++ b/polkadot/runtime/parachains/src/runtime_api_impl/v11.rs
@@ -420,12 +420,12 @@ pub(crate) fn backing_constraints<T: initializer::Config>(
 	{
 		shared::migration::v0::AllowedRelayParents::<T>::get().hypothetical_earliest_block_number(
 			now,
-			config.async_backing_params.allowed_ancestry_len,
+			config.scheduler_params.lookahead.saturating_sub(1),
 		)
 	} else {
 		shared::AllowedRelayParents::<T>::get().hypothetical_earliest_block_number(
 			now,
-			config.async_backing_params.allowed_ancestry_len,
+			config.scheduler_params.lookahead.saturating_sub(1),
 		)
 	};
 
@@ -508,6 +508,7 @@ pub fn backing_state<T: initializer::Config>(
 }
 
 /// Implementation for `AsyncBackingParams` function from the runtime API
+#[deprecated = "AsyncBackingParams are going to be removed and ignored by relay chain validators, in favour of dynamically computed values based on the claim queue assignments"]
 pub fn async_backing_params<T: configuration::Config>() -> AsyncBackingParams {
 	configuration::ActiveConfig::<T>::get().async_backing_params
 }
diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs
index 52a9a9e122889d6c0be793320553b4f1719296e6..5a77af0d79731e5d06d1c5b7e5bf9f02b7a3485d 100644
--- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs
+++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs
@@ -45,3 +45,8 @@ pub fn backing_constraints<T: initializer::Config>(
 		future_validation_code: constraints_v11.future_validation_code,
 	})
 }
+
+/// Implementation for `scheduling_lookahead` function from the runtime API
+pub fn scheduling_lookahead<T: initializer::Config>() -> u32 {
+	configuration::ActiveConfig::<T>::get().scheduler_params.lookahead
+}
diff --git a/polkadot/runtime/parachains/src/shared.rs b/polkadot/runtime/parachains/src/shared.rs
index 473c1aba7a066d198f7bcdad8ad920cf7cf955e8..94b69e4ae4b4a4b89aaa35b0b7a044281f4ab1e8 100644
--- a/polkadot/runtime/parachains/src/shared.rs
+++ b/polkadot/runtime/parachains/src/shared.rs
@@ -96,13 +96,10 @@ impl<Hash: PartialEq + Copy, BlockNumber: AtLeast32BitUnsigned + Copy>
 
 		let claim_queue = transpose_claim_queue(claim_queue);
 
-		// + 1 for the most recent block, which is always allowed.
-		let buffer_size_limit = max_ancestry_len as usize + 1;
-
 		self.buffer.push_back(RelayParentInfo { relay_parent, state_root, claim_queue });
 
 		self.latest_number = number;
-		while self.buffer.len() > buffer_size_limit {
+		while self.buffer.len() > (max_ancestry_len as usize) {
 			let _ = self.buffer.pop_front();
 		}
 
@@ -295,7 +292,7 @@ impl<T: Config> Pallet<T> {
 		max_ancestry_len: u32,
 	) {
 		AllowedRelayParents::<T>::mutate(|tracker| {
-			tracker.update(relay_parent, state_root, claim_queue, number, max_ancestry_len)
+			tracker.update(relay_parent, state_root, claim_queue, number, max_ancestry_len + 1)
 		})
 	}
 }
diff --git a/polkadot/runtime/parachains/src/shared/tests.rs b/polkadot/runtime/parachains/src/shared/tests.rs
index f7ea5148ce33417740f09b9e39b64f94b0a2c29b..a35945549e74261c37e4e28575a12e074f0c5a39 100644
--- a/polkadot/runtime/parachains/src/shared/tests.rs
+++ b/polkadot/runtime/parachains/src/shared/tests.rs
@@ -36,8 +36,8 @@ fn tracker_earliest_block_number() {
 
 	// Push a single block into the tracker, suppose max capacity is 1.
 	let max_ancestry_len = 0;
-	tracker.update(Hash::zero(), Hash::zero(), Default::default(), 0, max_ancestry_len);
-	assert_eq!(tracker.hypothetical_earliest_block_number(now, max_ancestry_len), now);
+	tracker.update(Hash::zero(), Hash::zero(), Default::default(), 0, max_ancestry_len + 1);
+	assert_eq!(tracker.hypothetical_earliest_block_number(now, max_ancestry_len as _), now);
 
 	// Test a greater capacity.
 	let max_ancestry_len = 4;
@@ -48,14 +48,14 @@ fn tracker_earliest_block_number() {
 			Hash::zero(),
 			Default::default(),
 			i,
-			max_ancestry_len,
+			max_ancestry_len + 1,
 		);
-		assert_eq!(tracker.hypothetical_earliest_block_number(i + 1, max_ancestry_len), 0);
+		assert_eq!(tracker.hypothetical_earliest_block_number(i + 1, max_ancestry_len as _), 0);
 	}
 
 	// Capacity exceeded.
 	tracker.update(Hash::zero(), Hash::zero(), Default::default(), now, max_ancestry_len);
-	assert_eq!(tracker.hypothetical_earliest_block_number(now + 1, max_ancestry_len), 1);
+	assert_eq!(tracker.hypothetical_earliest_block_number(now + 1, max_ancestry_len as _), 1);
 }
 
 #[test]
@@ -67,7 +67,7 @@ fn tracker_claim_queue_transpose() {
 	claim_queue.insert(CoreIndex(1), vec![Id::from(0), Id::from(0), Id::from(100)].into());
 	claim_queue.insert(CoreIndex(2), vec![Id::from(1), Id::from(2), Id::from(100)].into());
 
-	tracker.update(Hash::zero(), Hash::zero(), claim_queue, 1u32, 3u32);
+	tracker.update(Hash::zero(), Hash::zero(), claim_queue, 1u32, 4);
 
 	let (info, _block_num) = tracker.acquire_info(Hash::zero(), None).unwrap();
 	assert_eq!(
@@ -120,14 +120,20 @@ fn tracker_acquire_info() {
 	];
 
 	let (relay_parent, state_root) = blocks[0];
-	tracker.update(relay_parent, state_root, Default::default(), 0, max_ancestry_len);
+	tracker.update(relay_parent, state_root, Default::default(), 0, max_ancestry_len + 1);
 	assert_matches!(
 		tracker.acquire_info(relay_parent, None),
 		Some((s, b)) if s.state_root == state_root && b == 0
 	);
 
 	// Try to push a duplicate. Should be ignored.
-	tracker.update(relay_parent, Hash::repeat_byte(13), Default::default(), 0, max_ancestry_len);
+	tracker.update(
+		relay_parent,
+		Hash::repeat_byte(13),
+		Default::default(),
+		0,
+		max_ancestry_len + 1,
+	);
 	assert_eq!(tracker.buffer.len(), 1);
 	assert_matches!(
 		tracker.acquire_info(relay_parent, None),
@@ -135,9 +141,9 @@ fn tracker_acquire_info() {
 	);
 
 	let (relay_parent, state_root) = blocks[1];
-	tracker.update(relay_parent, state_root, Default::default(), 1u32, max_ancestry_len);
+	tracker.update(relay_parent, state_root, Default::default(), 1u32, max_ancestry_len + 1);
 	let (relay_parent, state_root) = blocks[2];
-	tracker.update(relay_parent, state_root, Default::default(), 2u32, max_ancestry_len);
+	tracker.update(relay_parent, state_root, Default::default(), 2u32, max_ancestry_len + 1);
 	for (block_num, (rp, state_root)) in blocks.iter().enumerate().take(2) {
 		assert_matches!(
 			tracker.acquire_info(*rp, None),
diff --git a/polkadot/runtime/rococo/src/genesis_config_presets.rs b/polkadot/runtime/rococo/src/genesis_config_presets.rs
index 83bd1fbbc8faf42e4b9c0579e937628293d7a17b..80be075bea2566d95de7ee7e97f90bbb788ab9d7 100644
--- a/polkadot/runtime/rococo/src/genesis_config_presets.rs
+++ b/polkadot/runtime/rococo/src/genesis_config_presets.rs
@@ -125,8 +125,8 @@ fn default_parachains_host_configuration(
 		zeroth_delay_tranche_width: 0,
 		minimum_validation_upgrade_delay: 5,
 		async_backing_params: AsyncBackingParams {
-			max_candidate_depth: 3,
-			allowed_ancestry_len: 2,
+			max_candidate_depth: 0,
+			allowed_ancestry_len: 0,
 		},
 		node_features: bitvec::vec::BitVec::from_element(
 			1u8 << (FeatureIndex::ElasticScalingMVP as usize) |
diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs
index f165091beda4790b55c8ed57686bf076e754f675..f7716e8b7d9cfa92d19d1f738649b358e72e7b20 100644
--- a/polkadot/runtime/rococo/src/lib.rs
+++ b/polkadot/runtime/rococo/src/lib.rs
@@ -79,7 +79,7 @@ use polkadot_runtime_parachains::{
 	origin as parachains_origin, paras as parachains_paras,
 	paras_inherent as parachains_paras_inherent,
 	runtime_api_impl::{
-		v11 as parachains_runtime_api_impl, vstaging as parachains_runtime_vstaging_api_impl,
+		v11 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl,
 	},
 	scheduler as parachains_scheduler, session_info as parachains_session_info,
 	shared as parachains_shared,
@@ -2129,6 +2129,7 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn async_backing_params() -> polkadot_primitives::AsyncBackingParams {
+			#[allow(deprecated)]
 			parachains_runtime_api_impl::async_backing_params::<Runtime>()
 		}
 
@@ -2153,7 +2154,11 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn backing_constraints(para_id: ParaId) -> Option<Constraints> {
-			parachains_runtime_vstaging_api_impl::backing_constraints::<Runtime>(para_id)
+			parachains_staging_runtime_api_impl::backing_constraints::<Runtime>(para_id)
+		}
+
+		fn scheduling_lookahead() -> u32 {
+			parachains_staging_runtime_api_impl::scheduling_lookahead::<Runtime>()
 		}
 	}
 
diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs
index 4126193388caef427992db4cd3e0c4964f682feb..1a19b637b798afbd03ac2cae5694407fdf05edb2 100644
--- a/polkadot/runtime/test-runtime/src/lib.rs
+++ b/polkadot/runtime/test-runtime/src/lib.rs
@@ -32,12 +32,14 @@ use pallet_transaction_payment::FungibleAdapter;
 
 use polkadot_runtime_parachains::{
 	assigner_coretime as parachains_assigner_coretime, configuration as parachains_configuration,
-	configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, coretime,
-	disputes as parachains_disputes, disputes::slashing as parachains_slashing,
+	configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio,
+	coretime, disputes as parachains_disputes,
+	disputes::slashing as parachains_slashing,
 	dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion,
 	initializer as parachains_initializer, on_demand as parachains_on_demand,
 	origin as parachains_origin, paras as parachains_paras,
-	paras_inherent as parachains_paras_inherent, runtime_api_impl::v11 as runtime_impl,
+	paras_inherent as parachains_paras_inherent,
+	runtime_api_impl::{v11 as runtime_impl, vstaging as staging_runtime_impl},
 	scheduler as parachains_scheduler, session_info as parachains_session_info,
 	shared as parachains_shared,
 };
@@ -61,8 +63,8 @@ use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo};
 use polkadot_primitives::{
 	slashing,
 	vstaging::{
-		CandidateEvent, CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState,
-		ScrapedOnChainVotes,
+		async_backing::Constraints, CandidateEvent,
+		CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, ScrapedOnChainVotes,
 	},
 	AccountId, AccountIndex, Balance, BlockNumber, CandidateHash, CoreIndex, DisputeState,
 	ExecutorParams, GroupRotationInfo, Hash as HashT, Id as ParaId, InboundDownwardMessage,
@@ -932,7 +934,7 @@ sp_api::impl_runtime_apis! {
 		}
 	}
 
-	#[api_version(11)]
+	#[api_version(12)]
 	impl polkadot_primitives::runtime_api::ParachainHost<Block> for Runtime {
 		fn validators() -> Vec<ValidatorId> {
 			runtime_impl::validators::<Runtime>()
@@ -1072,6 +1074,7 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn async_backing_params() -> polkadot_primitives::AsyncBackingParams {
+			#[allow(deprecated)]
 			runtime_impl::async_backing_params::<Runtime>()
 		}
 
@@ -1094,6 +1097,14 @@ sp_api::impl_runtime_apis! {
 		fn candidates_pending_availability(para_id: ParaId) -> Vec<CommittedCandidateReceipt<Hash>> {
 			runtime_impl::candidates_pending_availability::<Runtime>(para_id)
 		}
+
+		fn backing_constraints(para_id: ParaId) -> Option<Constraints> {
+			staging_runtime_impl::backing_constraints::<Runtime>(para_id)
+		}
+
+		fn scheduling_lookahead() -> u32 {
+			staging_runtime_impl::scheduling_lookahead::<Runtime>()
+		}
 	}
 
 	impl sp_consensus_beefy::BeefyApi<Block, BeefyId> for Runtime {
diff --git a/polkadot/runtime/westend/src/genesis_config_presets.rs b/polkadot/runtime/westend/src/genesis_config_presets.rs
index 729df20b3c65e9cd45ac1d5a46ba25251fe9ad1c..76c0ce015c0d8a8fb3106f06cb570cbdf4bd8608 100644
--- a/polkadot/runtime/westend/src/genesis_config_presets.rs
+++ b/polkadot/runtime/westend/src/genesis_config_presets.rs
@@ -128,8 +128,8 @@ fn default_parachains_host_configuration(
 		zeroth_delay_tranche_width: 0,
 		minimum_validation_upgrade_delay: 5,
 		async_backing_params: AsyncBackingParams {
-			max_candidate_depth: 3,
-			allowed_ancestry_len: 2,
+			max_candidate_depth: 0,
+			allowed_ancestry_len: 0,
 		},
 		node_features: bitvec::vec::BitVec::from_element(
 			1u8 << (FeatureIndex::ElasticScalingMVP as usize) |
diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index 4d5b56bcd911ad67e9d986f7567b5a119ce72c82..51eede7c7342ee1976c0c12cb69d9d395602cd65 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -85,7 +85,7 @@ use polkadot_runtime_parachains::{
 	origin as parachains_origin, paras as parachains_paras,
 	paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points,
 	runtime_api_impl::{
-		v11 as parachains_runtime_api_impl, vstaging as parachains_runtime_vstaging_api_impl,
+		v11 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl,
 	},
 	scheduler as parachains_scheduler, session_info as parachains_session_info,
 	shared as parachains_shared,
@@ -2162,6 +2162,7 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn async_backing_params() -> polkadot_primitives::AsyncBackingParams {
+			#[allow(deprecated)]
 			parachains_runtime_api_impl::async_backing_params::<Runtime>()
 		}
 
@@ -2186,7 +2187,11 @@ sp_api::impl_runtime_apis! {
 		}
 
 		fn backing_constraints(para_id: ParaId) -> Option<Constraints> {
-			parachains_runtime_vstaging_api_impl::backing_constraints::<Runtime>(para_id)
+			parachains_staging_runtime_api_impl::backing_constraints::<Runtime>(para_id)
+		}
+
+		fn scheduling_lookahead() -> u32 {
+			parachains_staging_runtime_api_impl::scheduling_lookahead::<Runtime>()
 		}
 	}
 
diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs
index 42aa83d9da7a22c2e21b9020cade89cf7c89c8ca..1bf972750d67f238ea8ccac8dffc0e6f796d6375 100644
--- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs
+++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs
@@ -41,10 +41,6 @@ async fn basic_3cores_test() -> Result<(), anyhow::Error> {
 								"num_cores": 2,
 								"max_validators_per_core": 1
 							},
-							"async_backing_params": {
-								"max_candidate_depth": 6,
-								"allowed_ancestry_len": 2
-							}
 						}
 					}
 				}))
diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs
index e65029d7095cb224ad9ef03b3013d3ff3f30b2eb..37b36efec772577ab613b3f67f7d45bb9ec0f713 100644
--- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs
+++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs
@@ -40,11 +40,7 @@ async fn doesnt_break_parachains_test() -> Result<(), anyhow::Error> {
 						"config": {
 							"scheduler_params": {
 								"num_cores": 1,
-								"max_validators_per_core": 2
-							},
-							"async_backing_params": {
-								"max_candidate_depth": 6,
-								"allowed_ancestry_len": 2
+								"max_validators_per_core": 2,
 							}
 						}
 					}
diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs
index aa9f41320135defd9a85e9ba3c5dafa623a3187b..aa1e54d7da5d98c02dbbc74eb8149f204212744c 100644
--- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs
+++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs
@@ -41,10 +41,6 @@ async fn slot_based_3cores_test() -> Result<(), anyhow::Error> {
 								// Num cores is 4, because 2 extra will be added automatically when registering the paras.
 								"num_cores": 4,
 								"max_validators_per_core": 2
-							},
-							"async_backing_params": {
-								"max_candidate_depth": 6,
-								"allowed_ancestry_len": 2
 							}
 						}
 					}
diff --git a/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs
index 14f86eb130f78e24796db6280f17fcb430d604ae..1f8c2aeff1c23ad2c22ac00a0ca9a7b45c1d45ba 100644
--- a/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs
+++ b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs
@@ -30,11 +30,8 @@ async fn async_backing_6_seconds_rate_test() -> Result<(), anyhow::Error> {
 					"configuration": {
 						"config": {
 							"scheduler_params": {
-								"group_rotation_frequency": 4,
-								"lookahead": 2,
-								"max_candidate_depth": 3,
-								"allowed_ancestry_len": 2,
-							},
+								"group_rotation_frequency": 4
+							}
 						}
 					}
 				}))
diff --git a/polkadot/zombienet_tests/functional/0015-coretime-shared-core.toml b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.toml
index fed30e0db05321631fdce66da858e1431ded64dd..c6545e476a64d22b242f2c4e238a3e7ba5df7ec0 100644
--- a/polkadot/zombienet_tests/functional/0015-coretime-shared-core.toml
+++ b/polkadot/zombienet_tests/functional/0015-coretime-shared-core.toml
@@ -1,13 +1,8 @@
 [settings]
 timeout = 1000
 
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params]
-  max_candidate_depth = 3
-  allowed_ancestry_len = 2
-
 [relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
   max_validators_per_core = 1
-  lookahead = 2
   num_cores = 4
 
 [relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params]
diff --git a/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml
index d3ff0000224279476bd7715ee87318ac3e89012b..050c1f01923bc07ecd80271bbf9f1ab98fd754b2 100644
--- a/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml
+++ b/polkadot/zombienet_tests/functional/0018-shared-core-idle-parachain.toml
@@ -3,7 +3,6 @@ timeout = 1000
 
 [relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
   max_validators_per_core = 2
-  lookahead = 2
   num_cores = 4
   group_rotation_frequency = 4
 
diff --git a/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.toml b/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.toml
index 43f3ef8f9e559a3266dc274c4dca8854e5704db8..f9028b930cfec9b32b54185f4097867beb9b02cf 100644
--- a/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.toml
+++ b/polkadot/zombienet_tests/functional/0019-coretime-collation-fetching-fairness.toml
@@ -1,14 +1,9 @@
 [settings]
 timeout = 1000
 
-[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params]
-  max_candidate_depth = 3
-  allowed_ancestry_len = 2
-
 [relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
   max_validators_per_core = 4
   num_cores = 1
-  lookahead = 2
 
 [relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params]
   needed_approvals = 3
diff --git a/prdoc/pr_7254.prdoc b/prdoc/pr_7254.prdoc
new file mode 100644
index 0000000000000000000000000000000000000000..a6a6cc5f1ef535bfed37f840f87eb41d478f14d5
--- /dev/null
+++ b/prdoc/pr_7254.prdoc
@@ -0,0 +1,58 @@
+title: deprecate AsyncBackingParams
+doc:
+
+- audience: [Node Dev, Runtime Dev]
+  description: |-
+    Removes all usage of the static async backing params, replacing them with dynamically computed equivalent values (based on the claim queue and scheduling lookahead).
+
+    Adds a new runtime API for querying the scheduling lookahead value. If not present, falls back to 3 (the default value that is backwards compatible with values we have on production networks for allowed_ancestry_len)
+
+    Also removes most code that handles async backing not yet being enabled, which includes support for collation protocol version 1 on collators, as it only worked for leaves not supporting async backing (which are none).
+
+crates:
+- name: cumulus-relay-chain-minimal-node
+  bump: minor
+- name: cumulus-relay-chain-rpc-interface
+  bump: minor
+- name: polkadot-node-core-candidate-validation
+  bump: minor
+- name: polkadot-node-core-prospective-parachains
+  bump: minor
+- name: polkadot-node-core-provisioner
+  bump: minor
+- name: polkadot-node-core-runtime-api
+  bump: minor
+- name: polkadot-collator-protocol
+  bump: major
+- name: polkadot-overseer
+  bump: major
+- name: polkadot-node-subsystem-types
+  bump: major
+- name: polkadot-node-subsystem-util
+  bump: major
+- name: polkadot-primitives
+  bump: minor
+- name: polkadot-runtime-parachains
+  bump: minor
+- name: rococo-runtime
+  bump: minor
+- name: westend-runtime
+  bump: minor
+- name: cumulus-client-consensus-aura
+  bump: minor
+- name: cumulus-relay-chain-inprocess-interface
+  bump: minor
+- name: cumulus-relay-chain-interface
+  bump: major
+- name: polkadot-statement-distribution
+  bump: major
+- name: polkadot
+  bump: none
+- name: polkadot-service
+  bump: minor
+- name: cumulus-client-consensus-common
+  bump: minor
+- name: cumulus-client-network
+  bump: minor
+- name: cumulus-client-pov-recovery
+  bump: minor